Revendor everything

Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>

Closes: #1277
Approved by: giuseppe
This commit is contained in:
Daniel J Walsh 2019-01-11 12:13:10 -05:00 committed by Atomic Bot
parent 017dd1ffd4
commit 3eef1ed0bd
401 changed files with 163073 additions and 11057 deletions

View File

@ -1527,6 +1527,11 @@ func (b *Executor) deleteSuccessfulIntermediateCtrs() error {
return lastErr
}
func (b *Executor) EnsureContainerPath(path string) error {
_, err := os.Stat(path)
return err
}
// preprocessDockerfileContents runs CPP(1) in preprocess-only mode on the input
// dockerfile content and will use ctxDir as the base include path.
//

5
vendor/github.com/Nvveen/Gotty/README generated vendored Normal file
View File

@ -0,0 +1,5 @@
Gotty is a library written in Go that determines and reads termcap database
files to produce an interface for interacting with the capabilities of a
terminal.
See the godoc documentation or the source code for more information about
function usage.

View File

@ -1,6 +1,7 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
https://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
@ -175,28 +176,16 @@
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Copyright The containerd Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -72,3 +72,13 @@ If you change the proto file you will need to rebuild the generated Go with `go
```console
$ go generate ./proto
```
## Project details
continuity is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE).
As a containerd sub-project, you will find the:
* [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md),
* [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS),
* and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md)
information in our [`containerd/project`](https://github.com/containerd/project) repository.

View File

@ -1,3 +1,19 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pathdriver
import (

View File

@ -10,4 +10,4 @@ github.com/spf13/pflag 4c012f6dcd9546820e378d0bdda4d8fc772cdfea
golang.org/x/crypto 9f005a07e0d31d45e6656d241bb5c0f2efd4bc94
golang.org/x/net a337091b0525af65de94df2eb7e98bd9962dcbe2
golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c
golang.org/x/sys 665f6529cca930e27b831a0d1dafffbe1c172924
golang.org/x/sys 77b0e4315053a57ed2962443614bdb28db152054

121
vendor/github.com/ghodss/yaml/README.md generated vendored Normal file
View File

@ -0,0 +1,121 @@
# YAML marshaling and unmarshaling support for Go
[![Build Status](https://travis-ci.org/ghodss/yaml.svg)](https://travis-ci.org/ghodss/yaml)
## Introduction
A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs.
In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/).
## Compatibility
This package uses [go-yaml](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility).
## Caveats
**Caveat #1:** When using `yaml.Marshal` and `yaml.Unmarshal`, binary data should NOT be preceded with the `!!binary` YAML tag. If you do, go-yaml will convert the binary data from base64 to native binary data, which is not compatible with JSON. You can still use binary in your YAML files though - just store them without the `!!binary` tag and decode the base64 in your code (e.g. in the custom JSON methods `MarshalJSON` and `UnmarshalJSON`). This also has the benefit that your YAML and your JSON binary data will be decoded exactly the same way. As an example:
```
BAD:
exampleKey: !!binary gIGC
GOOD:
exampleKey: gIGC
... and decode the base64 data in your code.
```
**Caveat #2:** When using `YAMLToJSON` directly, maps with keys that are maps will result in an error since this is not supported by JSON. This error will occur in `Unmarshal` as well since you can't unmarshal map keys anyways since struct fields can't be keys.
## Installation and usage
To install, run:
```
$ go get github.com/ghodss/yaml
```
And import using:
```
import "github.com/ghodss/yaml"
```
Usage is very similar to the JSON library:
```go
package main
import (
"fmt"
"github.com/ghodss/yaml"
)
type Person struct {
Name string `json:"name"` // Affects YAML field names too.
Age int `json:"age"`
}
func main() {
// Marshal a Person struct to YAML.
p := Person{"John", 30}
y, err := yaml.Marshal(p)
if err != nil {
fmt.Printf("err: %v\n", err)
return
}
fmt.Println(string(y))
/* Output:
age: 30
name: John
*/
// Unmarshal the YAML back into a Person struct.
var p2 Person
err = yaml.Unmarshal(y, &p2)
if err != nil {
fmt.Printf("err: %v\n", err)
return
}
fmt.Println(p2)
/* Output:
{John 30}
*/
}
```
`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available:
```go
package main
import (
"fmt"
"github.com/ghodss/yaml"
)
func main() {
j := []byte(`{"name": "John", "age": 30}`)
y, err := yaml.JSONToYAML(j)
if err != nil {
fmt.Printf("err: %v\n", err)
return
}
fmt.Println(string(y))
/* Output:
name: John
age: 30
*/
j2, err := yaml.YAMLToJSON(y)
if err != nil {
fmt.Printf("err: %v\n", err)
return
}
fmt.Println(string(j2))
/* Output:
{"age":30,"name":"John"}
*/
}
```

View File

@ -4,6 +4,7 @@ import (
"bytes"
"encoding/json"
"fmt"
"io"
"reflect"
"strconv"
@ -26,15 +27,19 @@ func Marshal(o interface{}) ([]byte, error) {
return y, nil
}
// Converts YAML to JSON then uses JSON to unmarshal into an object.
func Unmarshal(y []byte, o interface{}) error {
// JSONOpt is a decoding option for decoding from JSON format.
type JSONOpt func(*json.Decoder) *json.Decoder
// Unmarshal converts YAML to JSON then uses JSON to unmarshal into an object,
// optionally configuring the behavior of the JSON unmarshal.
func Unmarshal(y []byte, o interface{}, opts ...JSONOpt) error {
vo := reflect.ValueOf(o)
j, err := yamlToJSON(y, &vo)
j, err := yamlToJSON(y, &vo, yaml.Unmarshal)
if err != nil {
return fmt.Errorf("error converting YAML to JSON: %v", err)
}
err = json.Unmarshal(j, o)
err = jsonUnmarshal(bytes.NewReader(j), o, opts...)
if err != nil {
return fmt.Errorf("error unmarshaling JSON: %v", err)
}
@ -42,6 +47,21 @@ func Unmarshal(y []byte, o interface{}) error {
return nil
}
// jsonUnmarshal unmarshals the JSON byte stream from the given reader into the
// object, optionally applying decoder options prior to decoding. We are not
// using json.Unmarshal directly as we want the chance to pass in non-default
// options.
func jsonUnmarshal(r io.Reader, o interface{}, opts ...JSONOpt) error {
d := json.NewDecoder(r)
for _, opt := range opts {
d = opt(d)
}
if err := d.Decode(&o); err != nil {
return fmt.Errorf("while decoding JSON: %v", err)
}
return nil
}
// Convert JSON to YAML.
func JSONToYAML(j []byte) ([]byte, error) {
// Convert the JSON to an object.
@ -60,8 +80,8 @@ func JSONToYAML(j []byte) ([]byte, error) {
return yaml.Marshal(jsonObj)
}
// Convert YAML to JSON. Since JSON is a subset of YAML, passing JSON through
// this method should be a no-op.
// YAMLToJSON converts YAML to JSON. Since JSON is a subset of YAML,
// passing JSON through this method should be a no-op.
//
// Things YAML can do that are not supported by JSON:
// * In YAML you can have binary and null keys in your maps. These are invalid
@ -70,14 +90,22 @@ func JSONToYAML(j []byte) ([]byte, error) {
// use binary data with this library, encode the data as base64 as usual but do
// not use the !!binary tag in your YAML. This will ensure the original base64
// encoded data makes it all the way through to the JSON.
//
// For strict decoding of YAML, use YAMLToJSONStrict.
func YAMLToJSON(y []byte) ([]byte, error) {
return yamlToJSON(y, nil)
return yamlToJSON(y, nil, yaml.Unmarshal)
}
func yamlToJSON(y []byte, jsonTarget *reflect.Value) ([]byte, error) {
// YAMLToJSONStrict is like YAMLToJSON but enables strict YAML decoding,
// returning an error on any duplicate field names.
func YAMLToJSONStrict(y []byte) ([]byte, error) {
return yamlToJSON(y, nil, yaml.UnmarshalStrict)
}
func yamlToJSON(y []byte, jsonTarget *reflect.Value, yamlUnmarshal func([]byte, interface{}) error) ([]byte, error) {
// Convert the YAML to an object.
var yamlObj interface{}
err := yaml.Unmarshal(y, &yamlObj)
err := yamlUnmarshal(y, &yamlObj)
if err != nil {
return nil, err
}

14
vendor/github.com/ghodss/yaml/yaml_go110.go generated vendored Normal file
View File

@ -0,0 +1,14 @@
// This file contains changes that are only compatible with go 1.10 and onwards.
// +build go1.10
package yaml
import "encoding/json"
// DisallowUnknownFields configures the JSON decoder to error out if unknown
// fields come along, instead of dropping them by default.
func DisallowUnknownFields(d *json.Decoder) *json.Decoder {
d.DisallowUnknownFields()
return d
}

View File

@ -1,160 +1,160 @@
# compress
This package is based on an optimized Deflate function, which is used by gzip/zip/zlib packages.
It offers slightly better compression at lower compression settings, and up to 3x faster encoding at highest compression level.
* [High Throughput Benchmark](http://blog.klauspost.com/go-gzipdeflate-benchmarks/).
* [Small Payload/Webserver Benchmarks](http://blog.klauspost.com/gzip-performance-for-go-webservers/).
* [Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/).
* [Re-balancing Deflate Compression Levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/)
[![Build Status](https://travis-ci.org/klauspost/compress.svg?branch=master)](https://travis-ci.org/klauspost/compress)
[![Sourcegraph Badge](https://sourcegraph.com/github.com/klauspost/compress/-/badge.svg)](https://sourcegraph.com/github.com/klauspost/compress?badge)
# changelog
* Aug 1, 2018: Added [huff0 README](https://github.com/klauspost/compress/tree/master/huff0#huff0-entropy-compression).
* Jul 8, 2018: Added [Performance Update 2018](#performance-update-2018) below.
* Jun 23, 2018: Merged [Go 1.11 inflate optimizations](https://go-review.googlesource.com/c/go/+/102235). Go 1.9 is now required. Backwards compatible version tagged with [v1.3.0](https://github.com/klauspost/compress/releases/tag/v1.3.0).
* Apr 2, 2018: Added [huff0](https://godoc.org/github.com/klauspost/compress/huff0) en/decoder. Experimental for now, API may change.
* Mar 4, 2018: Added [FSE Entropy](https://godoc.org/github.com/klauspost/compress/fse) en/decoder. Experimental for now, API may change.
* Nov 3, 2017: Add compression [Estimate](https://godoc.org/github.com/klauspost/compress#Estimate) function.
* May 28, 2017: Reduce allocations when resetting decoder.
* Apr 02, 2017: Change back to official crc32, since changes were merged in Go 1.7.
* Jan 14, 2017: Reduce stack pressure due to array copies. See [Issue #18625](https://github.com/golang/go/issues/18625).
* Oct 25, 2016: Level 2-4 have been rewritten and now offers significantly better performance than before.
* Oct 20, 2016: Port zlib changes from Go 1.7 to fix zlib writer issue. Please update.
* Oct 16, 2016: Go 1.7 changes merged. Apples to apples this package is a few percent faster, but has a significantly better balance between speed and compression per level.
* Mar 24, 2016: Always attempt Huffman encoding on level 4-7. This improves base 64 encoded data compression.
* Mar 24, 2016: Small speedup for level 1-3.
* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster.
* Feb 19, 2016: Handle small payloads faster in level 1-3.
* Feb 19, 2016: Added faster level 2 + 3 compression modes.
* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5.
* Feb 14, 2016: Snappy: Merge upstream changes.
* Feb 14, 2016: Snappy: Fix aggressive skipping.
* Feb 14, 2016: Snappy: Update benchmark.
* Feb 13, 2016: Deflate: Fixed assembler problem that could lead to sub-optimal compression.
* Feb 12, 2016: Snappy: Added AMD64 SSE 4.2 optimizations to matching, which makes easy to compress material run faster. Typical speedup is around 25%.
* Feb 9, 2016: Added Snappy package fork. This version is 5-7% faster, much more on hard to compress content.
* Jan 30, 2016: Optimize level 1 to 3 by not considering static dictionary or storing uncompressed. ~4-5% speedup.
* Jan 16, 2016: Optimization on deflate level 1,2,3 compression.
* Jan 8 2016: Merge [CL 18317](https://go-review.googlesource.com/#/c/18317): fix reading, writing of zip64 archives.
* Dec 8 2015: Make level 1 and -2 deterministic even if write size differs.
* Dec 8 2015: Split encoding functions, so hashing and matching can potentially be inlined. 1-3% faster on AMD64. 5% faster on other platforms.
* Dec 8 2015: Fixed rare [one byte out-of bounds read](https://github.com/klauspost/compress/issues/20). Please update!
* Nov 23 2015: Optimization on token writer. ~2-4% faster. Contributed by [@dsnet](https://github.com/dsnet).
* Nov 20 2015: Small optimization to bit writer on 64 bit systems.
* Nov 17 2015: Fixed out-of-bound errors if the underlying Writer returned an error. See [#15](https://github.com/klauspost/compress/issues/15).
* Nov 12 2015: Added [io.WriterTo](https://golang.org/pkg/io/#WriterTo) support to gzip/inflate.
* Nov 11 2015: Merged [CL 16669](https://go-review.googlesource.com/#/c/16669/4): archive/zip: enable overriding (de)compressors per file
* Oct 15 2015: Added skipping on uncompressible data. Random data speed up >5x.
# usage
The packages are drop-in replacements for standard libraries. Simply replace the import path to use them:
| old import | new import |
|--------------------|-----------------------------------------|
| `compress/gzip` | `github.com/klauspost/compress/gzip` |
| `compress/zlib` | `github.com/klauspost/compress/zlib` |
| `archive/zip` | `github.com/klauspost/compress/zip` |
| `compress/flate` | `github.com/klauspost/compress/flate` |
You may also be interested in [pgzip](https://github.com/klauspost/pgzip), which is a drop in replacement for gzip, which support multithreaded compression on big files and the optimized [crc32](https://github.com/klauspost/crc32) package used by these packages.
The packages contains the same as the standard library, so you can use the godoc for that: [gzip](http://golang.org/pkg/compress/gzip/), [zip](http://golang.org/pkg/archive/zip/), [zlib](http://golang.org/pkg/compress/zlib/), [flate](http://golang.org/pkg/compress/flate/).
Currently there is only minor speedup on decompression (mostly CRC32 calculation).
# Performance Update 2018
It has been a while since we have been looking at the speed of this package compared to the standard library, so I thought I would re-do my tests and give some overall recommendations based on the current state. All benchmarks have been performed with Go 1.10 on my Desktop Intel(R) Core(TM) i7-2600 CPU @3.40GHz. Since I last ran the tests, I have gotten more RAM, which means tests with big files are no longer limited by my SSD.
The raw results are in my [updated spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). Due to cgo changes and upstream updates i could not get the cgo version of gzip to compile. Instead I included the [zstd](https://github.com/datadog/zstd) cgo implementation. If I get cgo gzip to work again, I might replace the results in the sheet.
The columns to take note of are: *MB/s* - the throughput. *Reduction* - the data size reduction in percent of the original. *Rel Speed* relative speed compared to the standard libary at the same level. *Smaller* - how many percent smaller is the compressed output compared to stdlib. Negative means the output was bigger. *Loss* means the loss (or gain) in compression as a percentage difference of the input.
The `gzstd` (standard library gzip) and `gzkp` (this package gzip) only uses one CPU core. [`pgzip`](https://github.com/klauspost/pgzip), [`bgzf`](https://github.com/biogo/hts/bgzf) uses all 4 cores. [`zstd`](https://github.com/DataDog/zstd) uses one core, and is a beast (but not Go, yet).
## Overall differences.
There appears to be a roughly 5-10% speed advantage over the standard library when comparing at similar compression levels.
The biggest difference you will see is the result of [re-balancing](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) the compression levels. I wanted by library to give a smoother transition between the compression levels than the standard library.
This package attempts to provide a more smooth transition, where "1" is taking a lot of shortcuts, "5" is the reasonable trade-off and "9" is the "give me the best compression", and the values in between gives something reasonable in between. The standard library has big differences in levels 1-4, but levels 5-9 having no significant gains - often spending a lot more time than can be justified by the achieved compression.
There are links to all the test data in the [spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) in the top left field on each tab.
## Web Content
This test set aims to emulate typical use in a web server. The test-set is 4GB data in 53k files, and is a mixture of (mostly) HTML, JS, CSS.
Since level 1 and 9 are close to being the same code, they are quite close. But looking at the levels in-between the differences are quite big.
Looking at level 6, this package is 88% faster, but will output about 6% more data. For a web server, this means you can serve 88% more data, but have to pay for 6% more bandwidth. You can draw your own conclusions on what would be the most expensive for your case.
## Object files
This test is for typical data files stored on a server. In this case it is a collection of Go precompiled objects. They are very compressible.
The picture is similar to the web content, but with small differences since this is very compressible. Levels 2-3 offer good speed, but is sacrificing quite a bit of compression.
The standard library seems suboptimal on level 3 and 4 - offering both worse compression and speed than level 6 & 7 of this package respectively.
## Highly Compressible File
This is a JSON file with very high redundancy. The reduction starts at 95% on level 1, so in real life terms we are dealing with something like a highly redundant stream of data, etc.
It is definitely visible that we are dealing with specialized content here, so the results are very scattered. This package does not do very well at levels 1-4, but picks up significantly at level 5 and levels 7 and 8 offering great speed for the achieved compression.
So if you know you content is extremely compressible you might want to go slightly higher than the defaults. The standard library has a huge gap between levels 3 and 4 in terms of speed (2.75x slowdown), so it offers little "middle ground".
## Medium-High Compressible
This is a pretty common test corpus: [enwik9](http://mattmahoney.net/dc/textdata.html). It contains the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. This is a very good test of typical text based compression and more data heavy streams.
We see a similar picture here as in "Web Content". On equal levels some compression is sacrificed for more speed. Level 5 seems to be the best trade-off between speed and size, beating stdlib level 3 in both.
## Medium Compressible
I will combine two test sets, one [10GB file set](http://mattmahoney.net/dc/10gb.html) and a VM disk image (~8GB). Both contain different data types and represent a typical backup scenario.
The most notable thing is how quickly the standard libary drops to very low compression speeds around level 5-6 without any big gains in compression. Since this type of data is fairly common, this does not seem like good behavior.
## Un-compressible Content
This is mainly a test of how good the algorithms are at detecting un-compressible input. The standard library only offers this feature with very conservative settings at level 1. Obviously there is no reason for the algorithms to try to compress input that cannot be compressed. The only downside is that it might skip some compressible data on false detections.
# linear time compression (huffman only)
This compression library adds a special compression level, named `HuffmanOnly`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character.
This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM).
Since this type of compression has much less variance, the compression speed is mostly unaffected by the input data, and is usually more than *180MB/s* for a single core.
The downside is that the compression ratio is usually considerably worse than even the fastest conventional compression. The compression raio can never be better than 8:1 (12.5%).
The linear time compression can be used as a "better than nothing" mode, where you cannot risk the encoder to slow down on some content. For comparison, the size of the "Twain" text is *233460 bytes* (+29% vs. level 1) and encode speed is 144MB/s (4.5x level 1). So in this case you trade a 30% size increase for a 4 times speedup.
For more information see my blog post on [Fast Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/).
This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip.
# snappy package
The standard snappy package has now been improved. This repo contains a copy of the snappy repo.
I would advise to use the standard package: https://github.com/golang/snappy
# license
This code is licensed under the same conditions as the original Go code. See LICENSE file.
# compress
This package is based on an optimized Deflate function, which is used by gzip/zip/zlib packages.
It offers slightly better compression at lower compression settings, and up to 3x faster encoding at highest compression level.
* [High Throughput Benchmark](http://blog.klauspost.com/go-gzipdeflate-benchmarks/).
* [Small Payload/Webserver Benchmarks](http://blog.klauspost.com/gzip-performance-for-go-webservers/).
* [Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/).
* [Re-balancing Deflate Compression Levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/)
[![Build Status](https://travis-ci.org/klauspost/compress.svg?branch=master)](https://travis-ci.org/klauspost/compress)
[![Sourcegraph Badge](https://sourcegraph.com/github.com/klauspost/compress/-/badge.svg)](https://sourcegraph.com/github.com/klauspost/compress?badge)
# changelog
* Aug 1, 2018: Added [huff0 README](https://github.com/klauspost/compress/tree/master/huff0#huff0-entropy-compression).
* Jul 8, 2018: Added [Performance Update 2018](#performance-update-2018) below.
* Jun 23, 2018: Merged [Go 1.11 inflate optimizations](https://go-review.googlesource.com/c/go/+/102235). Go 1.9 is now required. Backwards compatible version tagged with [v1.3.0](https://github.com/klauspost/compress/releases/tag/v1.3.0).
* Apr 2, 2018: Added [huff0](https://godoc.org/github.com/klauspost/compress/huff0) en/decoder. Experimental for now, API may change.
* Mar 4, 2018: Added [FSE Entropy](https://godoc.org/github.com/klauspost/compress/fse) en/decoder. Experimental for now, API may change.
* Nov 3, 2017: Add compression [Estimate](https://godoc.org/github.com/klauspost/compress#Estimate) function.
* May 28, 2017: Reduce allocations when resetting decoder.
* Apr 02, 2017: Change back to official crc32, since changes were merged in Go 1.7.
* Jan 14, 2017: Reduce stack pressure due to array copies. See [Issue #18625](https://github.com/golang/go/issues/18625).
* Oct 25, 2016: Level 2-4 have been rewritten and now offers significantly better performance than before.
* Oct 20, 2016: Port zlib changes from Go 1.7 to fix zlib writer issue. Please update.
* Oct 16, 2016: Go 1.7 changes merged. Apples to apples this package is a few percent faster, but has a significantly better balance between speed and compression per level.
* Mar 24, 2016: Always attempt Huffman encoding on level 4-7. This improves base 64 encoded data compression.
* Mar 24, 2016: Small speedup for level 1-3.
* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster.
* Feb 19, 2016: Handle small payloads faster in level 1-3.
* Feb 19, 2016: Added faster level 2 + 3 compression modes.
* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5.
* Feb 14, 2016: Snappy: Merge upstream changes.
* Feb 14, 2016: Snappy: Fix aggressive skipping.
* Feb 14, 2016: Snappy: Update benchmark.
* Feb 13, 2016: Deflate: Fixed assembler problem that could lead to sub-optimal compression.
* Feb 12, 2016: Snappy: Added AMD64 SSE 4.2 optimizations to matching, which makes easy to compress material run faster. Typical speedup is around 25%.
* Feb 9, 2016: Added Snappy package fork. This version is 5-7% faster, much more on hard to compress content.
* Jan 30, 2016: Optimize level 1 to 3 by not considering static dictionary or storing uncompressed. ~4-5% speedup.
* Jan 16, 2016: Optimization on deflate level 1,2,3 compression.
* Jan 8 2016: Merge [CL 18317](https://go-review.googlesource.com/#/c/18317): fix reading, writing of zip64 archives.
* Dec 8 2015: Make level 1 and -2 deterministic even if write size differs.
* Dec 8 2015: Split encoding functions, so hashing and matching can potentially be inlined. 1-3% faster on AMD64. 5% faster on other platforms.
* Dec 8 2015: Fixed rare [one byte out-of bounds read](https://github.com/klauspost/compress/issues/20). Please update!
* Nov 23 2015: Optimization on token writer. ~2-4% faster. Contributed by [@dsnet](https://github.com/dsnet).
* Nov 20 2015: Small optimization to bit writer on 64 bit systems.
* Nov 17 2015: Fixed out-of-bound errors if the underlying Writer returned an error. See [#15](https://github.com/klauspost/compress/issues/15).
* Nov 12 2015: Added [io.WriterTo](https://golang.org/pkg/io/#WriterTo) support to gzip/inflate.
* Nov 11 2015: Merged [CL 16669](https://go-review.googlesource.com/#/c/16669/4): archive/zip: enable overriding (de)compressors per file
* Oct 15 2015: Added skipping on uncompressible data. Random data speed up >5x.
# usage
The packages are drop-in replacements for standard libraries. Simply replace the import path to use them:
| old import | new import |
|--------------------|-----------------------------------------|
| `compress/gzip` | `github.com/klauspost/compress/gzip` |
| `compress/zlib` | `github.com/klauspost/compress/zlib` |
| `archive/zip` | `github.com/klauspost/compress/zip` |
| `compress/flate` | `github.com/klauspost/compress/flate` |
You may also be interested in [pgzip](https://github.com/klauspost/pgzip), which is a drop in replacement for gzip, which support multithreaded compression on big files and the optimized [crc32](https://github.com/klauspost/crc32) package used by these packages.
The packages contains the same as the standard library, so you can use the godoc for that: [gzip](http://golang.org/pkg/compress/gzip/), [zip](http://golang.org/pkg/archive/zip/), [zlib](http://golang.org/pkg/compress/zlib/), [flate](http://golang.org/pkg/compress/flate/).
Currently there is only minor speedup on decompression (mostly CRC32 calculation).
# Performance Update 2018
It has been a while since we have been looking at the speed of this package compared to the standard library, so I thought I would re-do my tests and give some overall recommendations based on the current state. All benchmarks have been performed with Go 1.10 on my Desktop Intel(R) Core(TM) i7-2600 CPU @3.40GHz. Since I last ran the tests, I have gotten more RAM, which means tests with big files are no longer limited by my SSD.
The raw results are in my [updated spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). Due to cgo changes and upstream updates i could not get the cgo version of gzip to compile. Instead I included the [zstd](https://github.com/datadog/zstd) cgo implementation. If I get cgo gzip to work again, I might replace the results in the sheet.
The columns to take note of are: *MB/s* - the throughput. *Reduction* - the data size reduction in percent of the original. *Rel Speed* relative speed compared to the standard libary at the same level. *Smaller* - how many percent smaller is the compressed output compared to stdlib. Negative means the output was bigger. *Loss* means the loss (or gain) in compression as a percentage difference of the input.
The `gzstd` (standard library gzip) and `gzkp` (this package gzip) only uses one CPU core. [`pgzip`](https://github.com/klauspost/pgzip), [`bgzf`](https://github.com/biogo/hts/bgzf) uses all 4 cores. [`zstd`](https://github.com/DataDog/zstd) uses one core, and is a beast (but not Go, yet).
## Overall differences.
There appears to be a roughly 5-10% speed advantage over the standard library when comparing at similar compression levels.
The biggest difference you will see is the result of [re-balancing](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) the compression levels. I wanted by library to give a smoother transition between the compression levels than the standard library.
This package attempts to provide a more smooth transition, where "1" is taking a lot of shortcuts, "5" is the reasonable trade-off and "9" is the "give me the best compression", and the values in between gives something reasonable in between. The standard library has big differences in levels 1-4, but levels 5-9 having no significant gains - often spending a lot more time than can be justified by the achieved compression.
There are links to all the test data in the [spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) in the top left field on each tab.
## Web Content
This test set aims to emulate typical use in a web server. The test-set is 4GB data in 53k files, and is a mixture of (mostly) HTML, JS, CSS.
Since level 1 and 9 are close to being the same code, they are quite close. But looking at the levels in-between the differences are quite big.
Looking at level 6, this package is 88% faster, but will output about 6% more data. For a web server, this means you can serve 88% more data, but have to pay for 6% more bandwidth. You can draw your own conclusions on what would be the most expensive for your case.
## Object files
This test is for typical data files stored on a server. In this case it is a collection of Go precompiled objects. They are very compressible.
The picture is similar to the web content, but with small differences since this is very compressible. Levels 2-3 offer good speed, but is sacrificing quite a bit of compression.
The standard library seems suboptimal on level 3 and 4 - offering both worse compression and speed than level 6 & 7 of this package respectively.
## Highly Compressible File
This is a JSON file with very high redundancy. The reduction starts at 95% on level 1, so in real life terms we are dealing with something like a highly redundant stream of data, etc.
It is definitely visible that we are dealing with specialized content here, so the results are very scattered. This package does not do very well at levels 1-4, but picks up significantly at level 5 and levels 7 and 8 offering great speed for the achieved compression.
So if you know you content is extremely compressible you might want to go slightly higher than the defaults. The standard library has a huge gap between levels 3 and 4 in terms of speed (2.75x slowdown), so it offers little "middle ground".
## Medium-High Compressible
This is a pretty common test corpus: [enwik9](http://mattmahoney.net/dc/textdata.html). It contains the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. This is a very good test of typical text based compression and more data heavy streams.
We see a similar picture here as in "Web Content". On equal levels some compression is sacrificed for more speed. Level 5 seems to be the best trade-off between speed and size, beating stdlib level 3 in both.
## Medium Compressible
I will combine two test sets, one [10GB file set](http://mattmahoney.net/dc/10gb.html) and a VM disk image (~8GB). Both contain different data types and represent a typical backup scenario.
The most notable thing is how quickly the standard libary drops to very low compression speeds around level 5-6 without any big gains in compression. Since this type of data is fairly common, this does not seem like good behavior.
## Un-compressible Content
This is mainly a test of how good the algorithms are at detecting un-compressible input. The standard library only offers this feature with very conservative settings at level 1. Obviously there is no reason for the algorithms to try to compress input that cannot be compressed. The only downside is that it might skip some compressible data on false detections.
# linear time compression (huffman only)
This compression library adds a special compression level, named `HuffmanOnly`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character.
This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM).
Since this type of compression has much less variance, the compression speed is mostly unaffected by the input data, and is usually more than *180MB/s* for a single core.
The downside is that the compression ratio is usually considerably worse than even the fastest conventional compression. The compression raio can never be better than 8:1 (12.5%).
The linear time compression can be used as a "better than nothing" mode, where you cannot risk the encoder to slow down on some content. For comparison, the size of the "Twain" text is *233460 bytes* (+29% vs. level 1) and encode speed is 144MB/s (4.5x level 1). So in this case you trade a 30% size increase for a 4 times speedup.
For more information see my blog post on [Fast Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/).
This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip.
# snappy package
The standard snappy package has now been improved. This repo contains a copy of the snappy repo.
I would advise to use the standard package: https://github.com/golang/snappy
# license
This code is licensed under the same conditions as the original Go code. See LICENSE file.

60
vendor/github.com/moby/moby/hack/README.md generated vendored Normal file
View File

@ -0,0 +1,60 @@
## About
This directory contains a collection of scripts used to build and manage this
repository. If there are any issues regarding the intention of a particular
script (or even part of a certain script), please reach out to us.
It may help us either refine our current scripts, or add on new ones
that are appropriate for a given use case.
## DinD (dind.sh)
DinD is a wrapper script which allows Docker to be run inside a Docker
container. DinD requires the container to
be run with privileged mode enabled.
## Generate Authors (generate-authors.sh)
Generates AUTHORS; a file with all the names and corresponding emails of
individual contributors. AUTHORS can be found in the home directory of
this repository.
## Make
There are two make files, each with different extensions. Neither are supposed
to be called directly; only invoke `make`. Both scripts run inside a Docker
container.
### make.ps1
- The Windows native build script that uses PowerShell semantics; it is limited
unlike `hack\make.sh` since it does not provide support for the full set of
operations provided by the Linux counterpart, `make.sh`. However, `make.ps1`
does provide support for local Windows development and Windows to Windows CI.
More information is found within `make.ps1` by the author, @jhowardmsft
### make.sh
- Referenced via `make test` when running tests on a local machine,
or directly referenced when running tests inside a Docker development container.
- When running on a local machine, `make test` to run all tests found in
`test`, `test-unit`, `test-integration`, and `test-docker-py` on
your local machine. The default timeout is set in `make.sh` to 60 minutes
(`${TIMEOUT:=60m}`), since it currently takes up to an hour to run
all of the tests.
- When running inside a Docker development container, `hack/make.sh` does
not have a single target that runs all the tests. You need to provide a
single command line with multiple targets that performs the same thing.
An example referenced from [Run targets inside a development container](https://docs.docker.com/opensource/project/test-and-docs/#run-targets-inside-a-development-container): `root@5f8630b873fe:/go/src/github.com/moby/moby# hack/make.sh dynbinary binary cross test-unit test-integration test-docker-py`
- For more information related to testing outside the scope of this README,
refer to
[Run tests and test documentation](https://docs.docker.com/opensource/project/test-and-docs/)
## Release (release.sh)
Releases any bundles built by `make` on a public AWS S3 bucket.
For information regarding configuration, please view `release.sh`.
## Vendor (vendor.sh)
A shell script that is a wrapper around Vndr. For information on how to use
this, please refer to [vndr's README](https://github.com/LK4D4/vndr/blob/master/README.md)

View File

@ -0,0 +1,69 @@
# Integration Testing on Swarm
IT on Swarm allows you to execute integration test in parallel across a Docker Swarm cluster
## Architecture
### Master service
- Works as a funker caller
- Calls a worker funker (`-worker-service`) with a chunk of `-check.f` filter strings (passed as a file via `-input` flag, typically `/mnt/input`)
### Worker service
- Works as a funker callee
- Executes an equivalent of `TESTFLAGS=-check.f TestFoo|TestBar|TestBaz ... make test-integration-cli` using the bind-mounted API socket (`docker.sock`)
### Client
- Controls master and workers via `docker stack`
- No need to have a local daemon
Typically, the master and workers are supposed to be running on a cloud environment,
while the client is supposed to be running on a laptop, e.g. Docker for Mac/Windows.
## Requirement
- Docker daemon 1.13 or later
- Private registry for distributed execution with multiple nodes
## Usage
### Step 1: Prepare images
$ make build-integration-cli-on-swarm
Following environment variables are known to work in this step:
- `BUILDFLAGS`
- `DOCKER_INCREMENTAL_BINARY`
Note: during the transition into Moby Project, you might need to create a symbolic link `$GOPATH/src/github.com/docker/docker` to `$GOPATH/src/github.com/moby/moby`.
### Step 2: Execute tests
$ ./hack/integration-cli-on-swarm/integration-cli-on-swarm -replicas 40 -push-worker-image YOUR_REGISTRY.EXAMPLE.COM/integration-cli-worker:latest
Following environment variables are known to work in this step:
- `DOCKER_GRAPHDRIVER`
- `DOCKER_EXPERIMENTAL`
#### Flags
Basic flags:
- `-replicas N`: the number of worker service replicas. i.e. degree of parallelism.
- `-chunks N`: the number of chunks. By default, `chunks` == `replicas`.
- `-push-worker-image REGISTRY/IMAGE:TAG`: push the worker image to the registry. Note that if you have only single node and hence you do not need a private registry, you do not need to specify `-push-worker-image`.
Experimental flags for mitigating makespan nonuniformity:
- `-shuffle`: Shuffle the test filter strings
Flags for debugging IT on Swarm itself:
- `-rand-seed N`: the random seed. This flag is useful for deterministic replaying. By default(0), the timestamp is used.
- `-filters-file FILE`: the file contains `-check.f` strings. By default, the file is automatically generated.
- `-dry-run`: skip the actual workload
- `keep-executor`: do not auto-remove executor containers, which is used for running privileged programs on Swarm

View File

@ -0,0 +1,2 @@
# dependencies specific to worker (i.e. github.com/docker/docker/...) are not vendored here
github.com/bfirsh/funker-go eaa0a2e06f30e72c9a0b7f858951e581e26ef773

13
vendor/github.com/mtrmac/gpgme/README.md generated vendored Normal file
View File

@ -0,0 +1,13 @@
# GPGME (golang)
Go wrapper for the GPGME library.
This library is intended for use with desktop applications. If you are looking to add OpenPGP support to a server application I suggest you first look at [golang.org/x/crypto/openpgp](https://godoc.org/golang.org/x/crypto/openpgp).
## Installation
go get -u github.com/proglottis/gpgme
## Documentation
* [godoc](https://godoc.org/github.com/proglottis/gpgme)

View File

@ -68,6 +68,7 @@ make BUILDTAGS='seccomp apparmor'
| selinux | selinux process and mount labeling | <none> |
| apparmor | apparmor profile support | <none> |
| ambient | ambient capability support | kernel 4.3 |
| nokmem | disable kernel memory account | <none> |
### Running the test suite
@ -263,3 +264,7 @@ PIDFile=/run/mycontainerid.pid
[Install]
WantedBy=multi-user.target
```
## License
The code and docs are released under the [Apache 2.0 license](LICENSE).

View File

@ -148,6 +148,7 @@ config := &configs.Config{
{Type: configs.NEWPID},
{Type: configs.NEWUSER},
{Type: configs.NEWNET},
{Type: configs.NEWCGROUP},
}),
Cgroups: &configs.Cgroup{
Name: "test-container",
@ -323,6 +324,7 @@ generated when building libcontainer with docker.
## Copyright and license
Code and documentation copyright 2014 Docker, inc. Code released under the Apache 2.0 license.
Docs released under Creative commons.
Code and documentation copyright 2014 Docker, inc.
The code and documentation are released under the [Apache 2.0 license](../LICENSE).
The documentation is also released under Creative Commons Attribution 4.0 International License.
You may obtain a copy of the license, titled CC-BY-4.0, at http://creativecommons.org/licenses/by/4.0/.

View File

@ -10,8 +10,8 @@ The `nsenter` package will `import "C"` and it uses [cgo](https://golang.org/cmd
package. In cgo, if the import of "C" is immediately preceded by a comment, that comment,
called the preamble, is used as a header when compiling the C parts of the package.
So every time we import package `nsenter`, the C code function `nsexec()` would be
called. And package `nsenter` is now only imported in `main_unix.go`, so every time
before we call `cmd.Start` on linux, that C code would run.
called. And package `nsenter` is only imported in `init.go`, so every time the runc
`init` command is invoked, that C code is run.
Because `nsexec()` must be run before the Go runtime in order to use the
Linux kernel namespace, you must `import` this library into a package if
@ -37,7 +37,7 @@ the parent `nsexec()` will exit and the child `nsexec()` process will
return to allow the Go runtime take over.
NOTE: We do both `setns(2)` and `clone(2)` even if we don't have any
CLONE_NEW* clone flags because we must fork a new process in order to
`CLONE_NEW*` clone flags because we must fork a new process in order to
enter the PID namespace.

View File

@ -42,6 +42,12 @@ enum sync_t {
SYNC_ERR = 0xFF, /* Fatal error, no turning back. The error code follows. */
};
/*
* Synchronisation value for cgroup namespace setup.
* The same constant is defined in process_linux.go as "createCgroupns".
*/
#define CREATECGROUPNS 0x80
/* longjmp() arguments. */
#define JUMP_PARENT 0x00
#define JUMP_CHILD 0xA0
@ -82,7 +88,7 @@ struct nlconfig_t {
uint8_t is_setgroup;
/* Rootless container settings. */
uint8_t is_rootless;
uint8_t is_rootless_euid; /* boolean */
char *uidmappath;
size_t uidmappath_len;
char *gidmappath;
@ -100,7 +106,7 @@ struct nlconfig_t {
#define GIDMAP_ATTR 27284
#define SETGROUP_ATTR 27285
#define OOM_SCORE_ADJ_ATTR 27286
#define ROOTLESS_ATTR 27287
#define ROOTLESS_EUID_ATTR 27287
#define UIDMAPPATH_ATTR 27288
#define GIDMAPPATH_ATTR 27289
@ -211,7 +217,7 @@ static int try_mapping_tool(const char *app, int pid, char *map, size_t map_len)
/*
* If @app is NULL, execve will segfault. Just check it here and bail (if
* we're in this path, the caller is already getting desparate and there
* we're in this path, the caller is already getting desperate and there
* isn't a backup to this failing). This usually would be a configuration
* or programming issue.
*/
@ -419,8 +425,8 @@ static void nl_parse(int fd, struct nlconfig_t *config)
case CLONE_FLAGS_ATTR:
config->cloneflags = readint32(current);
break;
case ROOTLESS_ATTR:
config->is_rootless = readint8(current);
case ROOTLESS_EUID_ATTR:
config->is_rootless_euid = readint8(current); /* boolean */
break;
case OOM_SCORE_ADJ_ATTR:
config->oom_score_adj = current;
@ -640,7 +646,6 @@ void nsexec(void)
case JUMP_PARENT:{
int len;
pid_t child, first_child = -1;
char buf[JSON_MAX];
bool ready = false;
/* For debugging. */
@ -687,7 +692,7 @@ void nsexec(void)
* newuidmap/newgidmap shall be used.
*/
if (config.is_rootless && !config.is_setgroup)
if (config.is_rootless_euid && !config.is_setgroup)
update_setgroups(child, SETGROUPS_DENY);
/* Set up mappings. */
@ -716,6 +721,18 @@ void nsexec(void)
kill(child, SIGKILL);
bail("failed to sync with child: write(SYNC_RECVPID_ACK)");
}
/* Send the init_func pid back to our parent.
*
* Send the init_func pid and the pid of the first child back to our parent.
* We need to send both back because we can't reap the first child we created (CLONE_PARENT).
* It becomes the responsibility of our parent to reap the first child.
*/
len = dprintf(pipenum, "{\"pid\": %d, \"pid_first\": %d}\n", child, first_child);
if (len < 0) {
kill(child, SIGKILL);
bail("unable to generate JSON for child pid");
}
}
break;
case SYNC_CHILD_READY:
@ -759,23 +776,6 @@ void nsexec(void)
bail("unexpected sync value: %u", s);
}
}
/*
* Send the init_func pid and the pid of the first child back to our parent.
*
* We need to send both back because we can't reap the first child we created (CLONE_PARENT).
* It becomes the responsibility of our parent to reap the first child.
*/
len = snprintf(buf, JSON_MAX, "{\"pid\": %d, \"pid_first\": %d}\n", child, first_child);
if (len < 0) {
kill(child, SIGKILL);
bail("unable to generate JSON for child pid");
}
if (write(pipenum, buf, len) != len) {
kill(child, SIGKILL);
bail("unable to send child pid to bootstrapper");
}
exit(0);
}
@ -862,14 +862,17 @@ void nsexec(void)
if (setresuid(0, 0, 0) < 0)
bail("failed to become root in user namespace");
}
/*
* Unshare all of the namespaces. Note that we don't merge this
* with clone() because there were some old kernel versions where
* clone(CLONE_PARENT | CLONE_NEWPID) was broken, so we'll just do
* it the long way.
* Unshare all of the namespaces. Now, it should be noted that this
* ordering might break in the future (especially with rootless
* containers). But for now, it's not possible to split this into
* CLONE_NEWUSER + [the rest] because of some RHEL SELinux issues.
*
* Note that we don't merge this with clone() because there were
* some old kernel versions where clone(CLONE_PARENT | CLONE_NEWPID)
* was broken, so we'll just do it the long way anyway.
*/
if (unshare(config.cloneflags) < 0)
if (unshare(config.cloneflags & ~CLONE_NEWCGROUP) < 0)
bail("failed to unshare namespaces");
/*
@ -953,11 +956,23 @@ void nsexec(void)
if (setgid(0) < 0)
bail("setgid failed");
if (!config.is_rootless && config.is_setgroup) {
if (!config.is_rootless_euid && config.is_setgroup) {
if (setgroups(0, NULL) < 0)
bail("setgroups failed");
}
/* ... wait until our topmost parent has finished cgroup setup in p.manager.Apply() ... */
if (config.cloneflags & CLONE_NEWCGROUP) {
uint8_t value;
if (read(pipenum, &value, sizeof(value)) != sizeof(value))
bail("read synchronisation value failed");
if (value == CREATECGROUPNS) {
if (unshare(CLONE_NEWCGROUP) < 0)
bail("failed to unshare cgroup namespace");
} else
bail("received unknown synchronisation value");
}
s = SYNC_CHILD_READY;
if (write(syncfd, &s, sizeof(s)) != sizeof(s))
bail("failed to sync with patent: write(SYNC_CHILD_READY)");

View File

@ -5,6 +5,7 @@ package user
import (
"io"
"os"
"strconv"
"golang.org/x/sys/unix"
)
@ -115,22 +116,23 @@ func CurrentGroup() (Group, error) {
return LookupGid(unix.Getgid())
}
func CurrentUserSubUIDs() ([]SubID, error) {
func currentUserSubIDs(fileName string) ([]SubID, error) {
u, err := CurrentUser()
if err != nil {
return nil, err
}
return ParseSubIDFileFilter("/etc/subuid",
func(entry SubID) bool { return entry.Name == u.Name })
filter := func(entry SubID) bool {
return entry.Name == u.Name || entry.Name == strconv.Itoa(u.Uid)
}
return ParseSubIDFileFilter(fileName, filter)
}
func CurrentGroupSubGIDs() ([]SubID, error) {
g, err := CurrentGroup()
if err != nil {
return nil, err
}
return ParseSubIDFileFilter("/etc/subgid",
func(entry SubID) bool { return entry.Name == g.Name })
func CurrentUserSubUIDs() ([]SubID, error) {
return currentUserSubIDs("/etc/subuid")
}
func CurrentUserSubGIDs() ([]SubID, error) {
return currentUserSubIDs("/etc/subgid")
}
func CurrentProcessUIDMap() ([]IDMap, error) {

View File

@ -1,7 +1,7 @@
# OCI runtime-spec. When updating this, make sure you use a version tag rather
# than a commit ID so it's much more obvious what version of the spec we are
# using.
github.com/opencontainers/runtime-spec v1.0.0
github.com/opencontainers/runtime-spec 5684b8af48c1ac3b1451fa499724e30e3c20a294
# Core libcontainer functionality.
github.com/mrunalp/fileutils ed869b029674c0e9ce4c0dfa781405c2d9946d08
github.com/opencontainers/selinux v1.0.0-rc1

View File

@ -70,7 +70,7 @@ is ignored.
## Code Example
```
```go
f, err := os.Open("path/to/Dockerfile")
if err != nil {
return err

View File

@ -40,6 +40,7 @@ type Run struct {
type Executor interface {
Preserve(path string) error
EnsureContainerPath(path string) error
Copy(excludes []string, copies ...Copy) error
Run(run Run, config docker.Config) error
UnrecognizedInstruction(step *Step) error
@ -52,6 +53,11 @@ func (logExecutor) Preserve(path string) error {
return nil
}
func (logExecutor) EnsureContainerPath(path string) error {
log.Printf("ENSURE %s", path)
return nil
}
func (logExecutor) Copy(excludes []string, copies ...Copy) error {
for _, c := range copies {
log.Printf("COPY %v -> %s (from:%s download:%t), chown: %s", c.Src, c.Dest, c.From, c.Download, c.Chown)
@ -75,6 +81,10 @@ func (noopExecutor) Preserve(path string) error {
return nil
}
func (noopExecutor) EnsureContainerPath(path string) error {
return nil
}
func (noopExecutor) Copy(excludes []string, copies ...Copy) error {
return nil
}
@ -319,6 +329,13 @@ func (b *Builder) Run(step *Step, exec Executor, noRunsRemaining bool) error {
if err := exec.Copy(b.Excludes, copies...); err != nil {
return err
}
if len(b.RunConfig.WorkingDir) > 0 {
if err := exec.EnsureContainerPath(b.RunConfig.WorkingDir); err != nil {
return err
}
}
for _, run := range runs {
config := b.Config()
config.Env = step.Env

59
vendor/github.com/pkg/errors/README.md generated vendored Normal file
View File

@ -0,0 +1,59 @@
# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors) [![Sourcegraph](https://sourcegraph.com/github.com/pkg/errors/-/badge.svg)](https://sourcegraph.com/github.com/pkg/errors?badge)
Package errors provides simple error handling primitives.
`go get github.com/pkg/errors`
The traditional error handling idiom in Go is roughly akin to
```go
if err != nil {
return err
}
```
which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error.
## Adding context to an error
The errors.Wrap function returns a new error that adds context to the original error. For example
```go
_, err := ioutil.ReadAll(r)
if err != nil {
return errors.Wrap(err, "read failed")
}
```
## Retrieving the cause of an error
Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`.
```go
type causer interface {
Cause() error
}
```
`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example:
```go
switch err := errors.Cause(err).(type) {
case *MyError:
// handle specifically
default:
// unknown error
}
```
[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors).
## Roadmap
With the upcoming [Go2 error proposals](https://go.googlesource.com/proposal/+/master/design/go2draft.md) this package is moving into maintenance mode. The roadmap for a 1.0 release is as follows:
- 0.9. Remove pre Go 1.9 support, address outstanding pull requests (if possible)
- 1.0. Final release.
## Contributing
Because of the Go2 errors changes, this package is not accepting proposals for new functionality. With that said, we welcome pull requests, bug fixes and issue reports.
Before sending a PR, please discuss your change by raising an issue.
## License
BSD-2-Clause

View File

@ -6,7 +6,7 @@
// return err
// }
//
// which applied recursively up the call stack results in error reports
// which when applied recursively up the call stack results in error reports
// without context or debugging information. The errors package allows
// programmers to add context to the failure path in their code in a way
// that does not destroy the original value of the error.
@ -15,16 +15,17 @@
//
// The errors.Wrap function returns a new error that adds context to the
// original error by recording a stack trace at the point Wrap is called,
// and the supplied message. For example
// together with the supplied message. For example
//
// _, err := ioutil.ReadAll(r)
// if err != nil {
// return errors.Wrap(err, "read failed")
// }
//
// If additional control is required the errors.WithStack and errors.WithMessage
// functions destructure errors.Wrap into its component operations of annotating
// an error with a stack trace and an a message, respectively.
// If additional control is required, the errors.WithStack and
// errors.WithMessage functions destructure errors.Wrap into its component
// operations: annotating an error with a stack trace and with a message,
// respectively.
//
// Retrieving the cause of an error
//
@ -38,7 +39,7 @@
// }
//
// can be inspected by errors.Cause. errors.Cause will recursively retrieve
// the topmost error which does not implement causer, which is assumed to be
// the topmost error that does not implement causer, which is assumed to be
// the original cause. For example:
//
// switch err := errors.Cause(err).(type) {
@ -48,16 +49,16 @@
// // unknown error
// }
//
// causer interface is not exported by this package, but is considered a part
// of stable public API.
// Although the causer interface is not exported by this package, it is
// considered a part of its stable public interface.
//
// Formatted printing of errors
//
// All error values returned from this package implement fmt.Formatter and can
// be formatted by the fmt package. The following verbs are supported
// be formatted by the fmt package. The following verbs are supported:
//
// %s print the error. If the error has a Cause it will be
// printed recursively
// printed recursively.
// %v see %s
// %+v extended format. Each Frame of the error's StackTrace will
// be printed in detail.
@ -65,13 +66,13 @@
// Retrieving the stack trace of an error or wrapper
//
// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are
// invoked. This information can be retrieved with the following interface.
// invoked. This information can be retrieved with the following interface:
//
// type stackTracer interface {
// StackTrace() errors.StackTrace
// }
//
// Where errors.StackTrace is defined as
// The returned errors.StackTrace type is defined as
//
// type StackTrace []Frame
//
@ -81,12 +82,12 @@
//
// if err, ok := err.(stackTracer); ok {
// for _, f := range err.StackTrace() {
// fmt.Printf("%+s:%d", f)
// fmt.Printf("%+s:%d\n", f, f)
// }
// }
//
// stackTracer interface is not exported by this package, but is considered a part
// of stable public API.
// Although the stackTracer interface is not exported by this package, it is
// considered a part of its stable public interface.
//
// See the documentation for Frame.Format for more details.
package errors
@ -192,7 +193,7 @@ func Wrap(err error, message string) error {
}
// Wrapf returns an error annotating err with a stack trace
// at the point Wrapf is call, and the format specifier.
// at the point Wrapf is called, and the format specifier.
// If err is nil, Wrapf returns nil.
func Wrapf(err error, format string, args ...interface{}) error {
if err == nil {
@ -220,6 +221,18 @@ func WithMessage(err error, message string) error {
}
}
// WithMessagef annotates err with the format specifier.
// If err is nil, WithMessagef returns nil.
func WithMessagef(err error, format string, args ...interface{}) error {
if err == nil {
return nil
}
return &withMessage{
cause: err,
msg: fmt.Sprintf(format, args...),
}
}
type withMessage struct {
cause error
msg string

View File

@ -5,10 +5,13 @@ import (
"io"
"path"
"runtime"
"strconv"
"strings"
)
// Frame represents a program counter inside a stack frame.
// For historical reasons if Frame is interpreted as a uintptr
// its value represents the program counter + 1.
type Frame uintptr
// pc returns the program counter for this frame;
@ -37,6 +40,15 @@ func (f Frame) line() int {
return line
}
// name returns the name of this function, if known.
func (f Frame) name() string {
fn := runtime.FuncForPC(f.pc())
if fn == nil {
return "unknown"
}
return fn.Name()
}
// Format formats the frame according to the fmt.Formatter interface.
//
// %s source file
@ -46,29 +58,24 @@ func (f Frame) line() int {
//
// Format accepts flags that alter the printing of some verbs, as follows:
//
// %+s path of source file relative to the compile time GOPATH
// %+s function name and path of source file relative to the compile time
// GOPATH separated by \n\t (<funcname>\n\t<path>)
// %+v equivalent to %+s:%d
func (f Frame) Format(s fmt.State, verb rune) {
switch verb {
case 's':
switch {
case s.Flag('+'):
pc := f.pc()
fn := runtime.FuncForPC(pc)
if fn == nil {
io.WriteString(s, "unknown")
} else {
file, _ := fn.FileLine(pc)
fmt.Fprintf(s, "%s\n\t%s", fn.Name(), file)
}
io.WriteString(s, f.name())
io.WriteString(s, "\n\t")
io.WriteString(s, f.file())
default:
io.WriteString(s, path.Base(f.file()))
}
case 'd':
fmt.Fprintf(s, "%d", f.line())
io.WriteString(s, strconv.Itoa(f.line()))
case 'n':
name := runtime.FuncForPC(f.pc()).Name()
io.WriteString(s, funcname(name))
io.WriteString(s, funcname(f.name()))
case 'v':
f.Format(s, 's')
io.WriteString(s, ":")
@ -79,24 +86,46 @@ func (f Frame) Format(s fmt.State, verb rune) {
// StackTrace is stack of Frames from innermost (newest) to outermost (oldest).
type StackTrace []Frame
// Format formats the stack of Frames according to the fmt.Formatter interface.
//
// %s lists source files for each Frame in the stack
// %v lists the source file and line number for each Frame in the stack
//
// Format accepts flags that alter the printing of some verbs, as follows:
//
// %+v Prints filename, function, and line number for each Frame in the stack.
func (st StackTrace) Format(s fmt.State, verb rune) {
switch verb {
case 'v':
switch {
case s.Flag('+'):
for _, f := range st {
fmt.Fprintf(s, "\n%+v", f)
io.WriteString(s, "\n")
f.Format(s, verb)
}
case s.Flag('#'):
fmt.Fprintf(s, "%#v", []Frame(st))
default:
fmt.Fprintf(s, "%v", []Frame(st))
st.formatSlice(s, verb)
}
case 's':
fmt.Fprintf(s, "%s", []Frame(st))
st.formatSlice(s, verb)
}
}
// formatSlice will format this StackTrace into the given buffer as a slice of
// Frame, only valid when called with '%s' or '%v'.
func (st StackTrace) formatSlice(s fmt.State, verb rune) {
io.WriteString(s, "[")
for i, f := range st {
if i > 0 {
io.WriteString(s, " ")
}
f.Format(s, verb)
}
io.WriteString(s, "]")
}
// stack represents a stack of program counters.
type stack []uintptr
@ -136,43 +165,3 @@ func funcname(name string) string {
i = strings.Index(name, ".")
return name[i+1:]
}
func trimGOPATH(name, file string) string {
// Here we want to get the source file path relative to the compile time
// GOPATH. As of Go 1.6.x there is no direct way to know the compiled
// GOPATH at runtime, but we can infer the number of path segments in the
// GOPATH. We note that fn.Name() returns the function name qualified by
// the import path, which does not include the GOPATH. Thus we can trim
// segments from the beginning of the file path until the number of path
// separators remaining is one more than the number of path separators in
// the function name. For example, given:
//
// GOPATH /home/user
// file /home/user/src/pkg/sub/file.go
// fn.Name() pkg/sub.Type.Method
//
// We want to produce:
//
// pkg/sub/file.go
//
// From this we can easily see that fn.Name() has one less path separator
// than our desired output. We count separators from the end of the file
// path until it finds two more than in the function name and then move
// one character forward to preserve the initial path segment without a
// leading separator.
const sep = "/"
goal := strings.Count(name, sep) + 2
i := len(file)
for n := 0; n < goal; n++ {
i = strings.LastIndex(file[:i], sep)
if i == -1 {
// not enough separators found, set i so that the slice expression
// below leaves file unmodified
i = -len(sep)
break
}
}
// get back to 0 or trim the leading separator
file = file[i+len(sep):]
return file
}

View File

@ -1,13 +1,14 @@
`containers-golang` is a set of Go libraries used by container runtimes to generate and load seccomp mappings into the kernel.
seccomp (short for secure computing mode) is a computer security facility in the Linux kernel. It was merged into the Linux kernel mainline in kernel version 2.6.12, which was released on March 8, 2005.[1] seccomp allows a process to make a one-way transition into a "secure" state where it cannot make any system calls except exit(), sigreturn(), read() and write() to already-open file descriptors. Should it attempt any other system calls, the kernel will terminate the process with SIGKILL or SIGSYS[2][3]. In this sense, it does not virtualize the system's resources but isolates the process from them entirely.
## Dependencies
seccomp (short for secure computing mode) is a BPF based syscall filter language and present a more conventional function-call based filtering interface that should be familiar to, and easily adopted by, application developers.
## Building
make - Generates default.json file, which containes the whitelisted syscalls that can be used by container runtime engines like [CRI-O][cri-o], [Buildah][buildah], [Podman][podman] and [Docker][docker], and container runtimes like OCI [Runc][runc] to controll the syscalls available to containers.
### Supported build tags
`seccomp`
## Contributing
When developing this library, please use `make` (or `make … BUILDTAGS=…`) to take advantage of the tests and validation.
@ -19,3 +20,10 @@ ASL 2.0
## Contact
- IRC: #[CRI-O](irc://irc.freenode.net:6667/#cri-o) on freenode.net
[cri-o]: https://github.com/kubernetes-incubator/cri-o/pulls
[buildah]: https://github.com/projectatomic/buildah
[podman]: https://github.com/projectatomic/podman
[docker]: https://github.com/docker/docker
[runc]: https://github.com/opencontainers/runc

View File

@ -56,8 +56,39 @@ time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased
time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
exit status 1
```
To ensure this behaviour even if a TTY is attached, set your formatter as follows:
```go
log.SetFormatter(&log.TextFormatter{
DisableColors: true,
FullTimestamp: true,
})
```
#### Logging Method Name
If you wish to add the calling method as a field, instruct the logger via:
```go
log.SetReportCaller(true)
```
This adds the caller as 'method' like so:
```json
{"animal":"penguin","level":"fatal","method":"github.com/sirupsen/arcticcreatures.migrate","msg":"a penguin swims by",
"time":"2014-03-10 19:57:38.562543129 -0400 EDT"}
```
```text
time="2015-03-26T01:27:38-04:00" level=fatal method=github.com/sirupsen/arcticcreatures.migrate msg="a penguin swims by" animal=penguin
```
Note that this does add measurable overhead - the cost will depend on the version of Go, but is
between 20 and 40% in recent tests with 1.6 and 1.7. You can validate this in your
environment via benchmarks:
```
go test -bench=.*CallerTracing
```
#### Case-sensitivity
@ -220,7 +251,7 @@ Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
```go
import (
log "github.com/sirupsen/logrus"
"gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake"
"gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "airbrake"
logrus_syslog "github.com/sirupsen/logrus/hooks/syslog"
"log/syslog"
)
@ -241,62 +272,15 @@ func init() {
```
Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
| Hook | Description |
| ----- | ----------- |
| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. |
| [Amazon Kinesis](https://github.com/evalphobia/logrus_kinesis) | Hook for logging to [Amazon Kinesis](https://aws.amazon.com/kinesis/) |
| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) |
| [AzureTableHook](https://github.com/kpfaulkner/azuretablehook/) | Hook for logging to Azure Table Storage|
| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
| [Discordrus](https://github.com/kz/discordrus) | Hook for logging to [Discord](https://discordapp.com/) |
| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch|
| [Firehose](https://github.com/beaubrewer/logrus_firehose) | Hook for logging to [Amazon Firehose](https://aws.amazon.com/kinesis/firehose/)
| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
| [Go-Slack](https://github.com/multiplay/go-slack) | Hook for logging to [Slack](https://slack.com) |
| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) |
| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
| [Influxus](http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB](http://influxdata.com/) |
| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
| [KafkaLogrus](https://github.com/tracer0tong/kafkalogrus) | Hook for logging to Kafka |
| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
| [Logentries](https://github.com/jcftang/logentriesrus) | Hook for logging to [Logentries](https://logentries.com/) |
| [Logentrus](https://github.com/puddingfactory/logentrus) | Hook for logging to [Logentries](https://logentries.com/) |
| [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) |
| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) |
| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
| [Mattermost](https://github.com/shuLhan/mattermost-integration/tree/master/hooks/logrus) | Hook for logging to [Mattermost](https://mattermost.com/) |
| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
| [NATS-Hook](https://github.com/rybit/nats_logrus_hook) | Hook for logging to [NATS](https://nats.io) |
| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. |
| [PostgreSQL](https://github.com/gemnasium/logrus-postgresql-hook) | Send logs to [PostgreSQL](http://postgresql.org) |
| [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) |
| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) |
| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)|
| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. |
| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
| [Stackdriver](https://github.com/knq/sdhook) | Hook for logging to [Google Stackdriver](https://cloud.google.com/logging/) |
| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)|
| [Syslog](https://github.com/sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
| [Syslog TLS](https://github.com/shinji62/logrus-syslog-ng) | Send errors to remote syslog server with TLS support. |
| [Telegram](https://github.com/rossmcdonald/telegram_hook) | Hook for logging errors to [Telegram](https://telegram.org/) |
| [TraceView](https://github.com/evalphobia/logrus_appneta) | Hook for logging to [AppNeta TraceView](https://www.appneta.com/products/traceview/) |
| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) |
| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash |
| [SQS-Hook](https://github.com/tsarpaul/logrus_sqs) | Hook for logging to [Amazon Simple Queue Service (SQS)](https://aws.amazon.com/sqs/) |
A list of currently known of service hook can be found in this wiki [page](https://github.com/sirupsen/logrus/wiki/Hooks)
#### Level logging
Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
Logrus has seven logging levels: Trace, Debug, Info, Warning, Error, Fatal and Panic.
```go
log.Trace("Something very low level.")
log.Debug("Useful debugging information.")
log.Info("Something noteworthy happened!")
log.Warn("You should probably take a look at this.")
@ -368,13 +352,16 @@ The built-in logging formatters are:
field to `true`. To force no colored output even if there is a TTY set the
`DisableColors` field to `true`. For Windows, see
[github.com/mattn/go-colorable](https://github.com/mattn/go-colorable).
* When colors are enabled, levels are truncated to 4 characters by default. To disable
truncation set the `DisableLevelTruncation` field to `true`.
* All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter).
* `logrus.JSONFormatter`. Logs fields as JSON.
* All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter).
Third party logging formatters:
* [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can by parsed by Kubernetes and Google Container Engine.
* [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can be parsed by Kubernetes and Google Container Engine.
* [`GELF`](https://github.com/fabienm/go-logrus-formatters). Formats entries so they comply to Graylog's [GELF 1.1 specification](http://docs.graylog.org/en/2.4/pages/gelf.html).
* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events.
* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
@ -491,7 +478,7 @@ logrus.RegisterExitHandler(handler)
#### Thread safety
By default Logger is protected by mutex for concurrent writes, this mutex is invoked when calling hooks and writing logs.
By default, Logger is protected by a mutex for concurrent writes. The mutex is held when calling hooks and writing logs.
If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking.
Situation when locking is not needed includes:

View File

@ -4,11 +4,30 @@ import (
"bytes"
"fmt"
"os"
"reflect"
"runtime"
"strings"
"sync"
"time"
)
var bufferPool *sync.Pool
var (
bufferPool *sync.Pool
// qualified package name, cached at first use
logrusPackage string
// Positions in the call stack when tracing to report the calling method
minimumCallerDepth int
// Used for caller information initialisation
callerInitOnce sync.Once
)
const (
maximumCallerDepth int = 25
knownLogrusFrames int = 4
)
func init() {
bufferPool = &sync.Pool{
@ -16,15 +35,18 @@ func init() {
return new(bytes.Buffer)
},
}
// start at the bottom of the stack before the package-name cache is primed
minimumCallerDepth = 1
}
// Defines the key when adding errors using WithError.
var ErrorKey = "error"
// An entry is the final or intermediate Logrus logging entry. It contains all
// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
// passed around as much as you wish to avoid field duplication.
// the fields passed with WithField{,s}. It's finally logged when Trace, Debug,
// Info, Warn, Error, Fatal or Panic is called on it. These objects can be
// reused and passed around as much as you wish to avoid field duplication.
type Entry struct {
Logger *Logger
@ -34,22 +56,28 @@ type Entry struct {
// Time at which the log entry was created
Time time.Time
// Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
// Level the log entry was logged at: Trace, Debug, Info, Warn, Error, Fatal or Panic
// This field will be set on entry firing and the value will be equal to the one in Logger struct field.
Level Level
// Message passed to Debug, Info, Warn, Error, Fatal or Panic
// Calling method, with package name
Caller *runtime.Frame
// Message passed to Trace, Debug, Info, Warn, Error, Fatal or Panic
Message string
// When formatter is called in entry.log(), an Buffer may be set to entry
// When formatter is called in entry.log(), a Buffer may be set to entry
Buffer *bytes.Buffer
// err may contain a field formatting error
err string
}
func NewEntry(logger *Logger) *Entry {
return &Entry{
Logger: logger,
// Default is three fields, give a little extra room
Data: make(Fields, 5),
// Default is three fields, plus one optional. Give a little extra room.
Data: make(Fields, 6),
}
}
@ -80,46 +108,117 @@ func (entry *Entry) WithFields(fields Fields) *Entry {
for k, v := range entry.Data {
data[k] = v
}
fieldErr := entry.err
for k, v := range fields {
data[k] = v
isErrField := false
if t := reflect.TypeOf(v); t != nil {
switch t.Kind() {
case reflect.Func:
isErrField = true
case reflect.Ptr:
isErrField = t.Elem().Kind() == reflect.Func
}
}
if isErrField {
tmp := fmt.Sprintf("can not add field %q", k)
if fieldErr != "" {
fieldErr = entry.err + ", " + tmp
} else {
fieldErr = tmp
}
} else {
data[k] = v
}
}
return &Entry{Logger: entry.Logger, Data: data}
return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, err: fieldErr}
}
// Overrides the time of the Entry.
func (entry *Entry) WithTime(t time.Time) *Entry {
return &Entry{Logger: entry.Logger, Data: entry.Data, Time: t, err: entry.err}
}
// getPackageName reduces a fully qualified function name to the package name
// There really ought to be to be a better way...
func getPackageName(f string) string {
for {
lastPeriod := strings.LastIndex(f, ".")
lastSlash := strings.LastIndex(f, "/")
if lastPeriod > lastSlash {
f = f[:lastPeriod]
} else {
break
}
}
return f
}
// getCaller retrieves the name of the first non-logrus calling function
func getCaller() *runtime.Frame {
// Restrict the lookback frames to avoid runaway lookups
pcs := make([]uintptr, maximumCallerDepth)
depth := runtime.Callers(minimumCallerDepth, pcs)
frames := runtime.CallersFrames(pcs[:depth])
// cache this package's fully-qualified name
callerInitOnce.Do(func() {
logrusPackage = getPackageName(runtime.FuncForPC(pcs[0]).Name())
// now that we have the cache, we can skip a minimum count of known-logrus functions
// XXX this is dubious, the number of frames may vary store an entry in a logger interface
minimumCallerDepth = knownLogrusFrames
})
for f, again := frames.Next(); again; f, again = frames.Next() {
pkg := getPackageName(f.Function)
// If the caller isn't part of this package, we're done
if pkg != logrusPackage {
return &f
}
}
// if we got here, we failed to find the caller's context
return nil
}
func (entry Entry) HasCaller() (has bool) {
return entry.Logger != nil &&
entry.Logger.ReportCaller &&
entry.Caller != nil
}
// This function is not declared with a pointer value because otherwise
// race conditions will occur when using multiple goroutines
func (entry Entry) log(level Level, msg string) {
var buffer *bytes.Buffer
entry.Time = time.Now()
// Default to now, but allow users to override if they want.
//
// We don't have to worry about polluting future calls to Entry#log()
// with this assignment because this function is declared with a
// non-pointer receiver.
if entry.Time.IsZero() {
entry.Time = time.Now()
}
entry.Level = level
entry.Message = msg
entry.Logger.mu.Lock()
err := entry.Logger.Hooks.Fire(level, &entry)
entry.Logger.mu.Unlock()
if err != nil {
entry.Logger.mu.Lock()
fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
entry.Logger.mu.Unlock()
if entry.Logger.ReportCaller {
entry.Caller = getCaller()
}
entry.fireHooks()
buffer = bufferPool.Get().(*bytes.Buffer)
buffer.Reset()
defer bufferPool.Put(buffer)
entry.Buffer = buffer
serialized, err := entry.Logger.Formatter.Format(&entry)
entry.write()
entry.Buffer = nil
if err != nil {
entry.Logger.mu.Lock()
fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
entry.Logger.mu.Unlock()
} else {
entry.Logger.mu.Lock()
_, err = entry.Logger.Out.Write(serialized)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
}
entry.Logger.mu.Unlock()
}
// To avoid Entry#log() returning a value that only would make sense for
// panic() to use in Entry#Panic(), we avoid the allocation by checking
@ -129,26 +228,53 @@ func (entry Entry) log(level Level, msg string) {
}
}
func (entry *Entry) Debug(args ...interface{}) {
if entry.Logger.level() >= DebugLevel {
entry.log(DebugLevel, fmt.Sprint(args...))
func (entry *Entry) fireHooks() {
entry.Logger.mu.Lock()
defer entry.Logger.mu.Unlock()
err := entry.Logger.Hooks.Fire(entry.Level, entry)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
}
}
func (entry *Entry) write() {
entry.Logger.mu.Lock()
defer entry.Logger.mu.Unlock()
serialized, err := entry.Logger.Formatter.Format(entry)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
} else {
_, err = entry.Logger.Out.Write(serialized)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
}
}
}
func (entry *Entry) Log(level Level, args ...interface{}) {
if entry.Logger.IsLevelEnabled(level) {
entry.log(level, fmt.Sprint(args...))
}
}
func (entry *Entry) Trace(args ...interface{}) {
entry.Log(TraceLevel, args...)
}
func (entry *Entry) Debug(args ...interface{}) {
entry.Log(DebugLevel, args...)
}
func (entry *Entry) Print(args ...interface{}) {
entry.Info(args...)
}
func (entry *Entry) Info(args ...interface{}) {
if entry.Logger.level() >= InfoLevel {
entry.log(InfoLevel, fmt.Sprint(args...))
}
entry.Log(InfoLevel, args...)
}
func (entry *Entry) Warn(args ...interface{}) {
if entry.Logger.level() >= WarnLevel {
entry.log(WarnLevel, fmt.Sprint(args...))
}
entry.Log(WarnLevel, args...)
}
func (entry *Entry) Warning(args ...interface{}) {
@ -156,37 +282,35 @@ func (entry *Entry) Warning(args ...interface{}) {
}
func (entry *Entry) Error(args ...interface{}) {
if entry.Logger.level() >= ErrorLevel {
entry.log(ErrorLevel, fmt.Sprint(args...))
}
entry.Log(ErrorLevel, args...)
}
func (entry *Entry) Fatal(args ...interface{}) {
if entry.Logger.level() >= FatalLevel {
entry.log(FatalLevel, fmt.Sprint(args...))
}
Exit(1)
entry.Log(FatalLevel, args...)
entry.Logger.Exit(1)
}
func (entry *Entry) Panic(args ...interface{}) {
if entry.Logger.level() >= PanicLevel {
entry.log(PanicLevel, fmt.Sprint(args...))
}
entry.Log(PanicLevel, args...)
panic(fmt.Sprint(args...))
}
// Entry Printf family functions
func (entry *Entry) Logf(level Level, format string, args ...interface{}) {
entry.Log(level, fmt.Sprintf(format, args...))
}
func (entry *Entry) Tracef(format string, args ...interface{}) {
entry.Logf(TraceLevel, format, args...)
}
func (entry *Entry) Debugf(format string, args ...interface{}) {
if entry.Logger.level() >= DebugLevel {
entry.Debug(fmt.Sprintf(format, args...))
}
entry.Logf(DebugLevel, format, args...)
}
func (entry *Entry) Infof(format string, args ...interface{}) {
if entry.Logger.level() >= InfoLevel {
entry.Info(fmt.Sprintf(format, args...))
}
entry.Logf(InfoLevel, format, args...)
}
func (entry *Entry) Printf(format string, args ...interface{}) {
@ -194,9 +318,7 @@ func (entry *Entry) Printf(format string, args ...interface{}) {
}
func (entry *Entry) Warnf(format string, args ...interface{}) {
if entry.Logger.level() >= WarnLevel {
entry.Warn(fmt.Sprintf(format, args...))
}
entry.Logf(WarnLevel, format, args...)
}
func (entry *Entry) Warningf(format string, args ...interface{}) {
@ -204,36 +326,36 @@ func (entry *Entry) Warningf(format string, args ...interface{}) {
}
func (entry *Entry) Errorf(format string, args ...interface{}) {
if entry.Logger.level() >= ErrorLevel {
entry.Error(fmt.Sprintf(format, args...))
}
entry.Logf(ErrorLevel, format, args...)
}
func (entry *Entry) Fatalf(format string, args ...interface{}) {
if entry.Logger.level() >= FatalLevel {
entry.Fatal(fmt.Sprintf(format, args...))
}
Exit(1)
entry.Logf(FatalLevel, format, args...)
entry.Logger.Exit(1)
}
func (entry *Entry) Panicf(format string, args ...interface{}) {
if entry.Logger.level() >= PanicLevel {
entry.Panic(fmt.Sprintf(format, args...))
}
entry.Logf(PanicLevel, format, args...)
}
// Entry Println family functions
func (entry *Entry) Debugln(args ...interface{}) {
if entry.Logger.level() >= DebugLevel {
entry.Debug(entry.sprintlnn(args...))
func (entry *Entry) Logln(level Level, args ...interface{}) {
if entry.Logger.IsLevelEnabled(level) {
entry.Log(level, entry.sprintlnn(args...))
}
}
func (entry *Entry) Traceln(args ...interface{}) {
entry.Logln(TraceLevel, args...)
}
func (entry *Entry) Debugln(args ...interface{}) {
entry.Logln(DebugLevel, args...)
}
func (entry *Entry) Infoln(args ...interface{}) {
if entry.Logger.level() >= InfoLevel {
entry.Info(entry.sprintlnn(args...))
}
entry.Logln(InfoLevel, args...)
}
func (entry *Entry) Println(args ...interface{}) {
@ -241,9 +363,7 @@ func (entry *Entry) Println(args ...interface{}) {
}
func (entry *Entry) Warnln(args ...interface{}) {
if entry.Logger.level() >= WarnLevel {
entry.Warn(entry.sprintlnn(args...))
}
entry.Logln(WarnLevel, args...)
}
func (entry *Entry) Warningln(args ...interface{}) {
@ -251,22 +371,16 @@ func (entry *Entry) Warningln(args ...interface{}) {
}
func (entry *Entry) Errorln(args ...interface{}) {
if entry.Logger.level() >= ErrorLevel {
entry.Error(entry.sprintlnn(args...))
}
entry.Logln(ErrorLevel, args...)
}
func (entry *Entry) Fatalln(args ...interface{}) {
if entry.Logger.level() >= FatalLevel {
entry.Fatal(entry.sprintlnn(args...))
}
Exit(1)
entry.Logln(FatalLevel, args...)
entry.Logger.Exit(1)
}
func (entry *Entry) Panicln(args ...interface{}) {
if entry.Logger.level() >= PanicLevel {
entry.Panic(entry.sprintlnn(args...))
}
entry.Logln(PanicLevel, args...)
}
// Sprintlnn => Sprint no newline. This is to get the behavior of how

View File

@ -2,6 +2,7 @@ package logrus
import (
"io"
"time"
)
var (
@ -15,37 +16,38 @@ func StandardLogger() *Logger {
// SetOutput sets the standard logger output.
func SetOutput(out io.Writer) {
std.mu.Lock()
defer std.mu.Unlock()
std.Out = out
std.SetOutput(out)
}
// SetFormatter sets the standard logger formatter.
func SetFormatter(formatter Formatter) {
std.mu.Lock()
defer std.mu.Unlock()
std.Formatter = formatter
std.SetFormatter(formatter)
}
// SetReportCaller sets whether the standard logger will include the calling
// method as a field.
func SetReportCaller(include bool) {
std.SetReportCaller(include)
}
// SetLevel sets the standard logger level.
func SetLevel(level Level) {
std.mu.Lock()
defer std.mu.Unlock()
std.SetLevel(level)
}
// GetLevel returns the standard logger level.
func GetLevel() Level {
std.mu.Lock()
defer std.mu.Unlock()
return std.level()
return std.GetLevel()
}
// IsLevelEnabled checks if the log level of the standard logger is greater than the level param
func IsLevelEnabled(level Level) bool {
return std.IsLevelEnabled(level)
}
// AddHook adds a hook to the standard logger hooks.
func AddHook(hook Hook) {
std.mu.Lock()
defer std.mu.Unlock()
std.Hooks.Add(hook)
std.AddHook(hook)
}
// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
@ -72,6 +74,20 @@ func WithFields(fields Fields) *Entry {
return std.WithFields(fields)
}
// WithTime creats an entry from the standard logger and overrides the time of
// logs generated with it.
//
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
// or Panic on the Entry it returns.
func WithTime(t time.Time) *Entry {
return std.WithTime(t)
}
// Trace logs a message at level Trace on the standard logger.
func Trace(args ...interface{}) {
std.Trace(args...)
}
// Debug logs a message at level Debug on the standard logger.
func Debug(args ...interface{}) {
std.Debug(args...)
@ -107,11 +123,16 @@ func Panic(args ...interface{}) {
std.Panic(args...)
}
// Fatal logs a message at level Fatal on the standard logger.
// Fatal logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
func Fatal(args ...interface{}) {
std.Fatal(args...)
}
// Tracef logs a message at level Trace on the standard logger.
func Tracef(format string, args ...interface{}) {
std.Tracef(format, args...)
}
// Debugf logs a message at level Debug on the standard logger.
func Debugf(format string, args ...interface{}) {
std.Debugf(format, args...)
@ -147,11 +168,16 @@ func Panicf(format string, args ...interface{}) {
std.Panicf(format, args...)
}
// Fatalf logs a message at level Fatal on the standard logger.
// Fatalf logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
func Fatalf(format string, args ...interface{}) {
std.Fatalf(format, args...)
}
// Traceln logs a message at level Trace on the standard logger.
func Traceln(args ...interface{}) {
std.Traceln(args...)
}
// Debugln logs a message at level Debug on the standard logger.
func Debugln(args ...interface{}) {
std.Debugln(args...)
@ -187,7 +213,7 @@ func Panicln(args ...interface{}) {
std.Panicln(args...)
}
// Fatalln logs a message at level Fatal on the standard logger.
// Fatalln logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
func Fatalln(args ...interface{}) {
std.Fatalln(args...)
}

View File

@ -2,7 +2,16 @@ package logrus
import "time"
const defaultTimestampFormat = time.RFC3339
// Default key names for the default fields
const (
defaultTimestampFormat = time.RFC3339
FieldKeyMsg = "msg"
FieldKeyLevel = "level"
FieldKeyTime = "time"
FieldKeyLogrusError = "logrus_error"
FieldKeyFunc = "func"
FieldKeyFile = "file"
)
// The Formatter interface is used to implement a custom Formatter. It takes an
// `Entry`. It exposes all the fields, including the default ones:
@ -18,7 +27,7 @@ type Formatter interface {
Format(*Entry) ([]byte, error)
}
// This is to not silently overwrite `time`, `msg` and `level` fields when
// This is to not silently overwrite `time`, `msg`, `func` and `level` fields when
// dumping it. If this code wasn't there doing:
//
// logrus.WithField("level", 1).Info("hello")
@ -30,16 +39,40 @@ type Formatter interface {
//
// It's not exported because it's still using Data in an opinionated way. It's to
// avoid code duplication between the two default formatters.
func prefixFieldClashes(data Fields) {
if t, ok := data["time"]; ok {
data["fields.time"] = t
func prefixFieldClashes(data Fields, fieldMap FieldMap, reportCaller bool) {
timeKey := fieldMap.resolve(FieldKeyTime)
if t, ok := data[timeKey]; ok {
data["fields."+timeKey] = t
delete(data, timeKey)
}
if m, ok := data["msg"]; ok {
data["fields.msg"] = m
msgKey := fieldMap.resolve(FieldKeyMsg)
if m, ok := data[msgKey]; ok {
data["fields."+msgKey] = m
delete(data, msgKey)
}
if l, ok := data["level"]; ok {
data["fields.level"] = l
levelKey := fieldMap.resolve(FieldKeyLevel)
if l, ok := data[levelKey]; ok {
data["fields."+levelKey] = l
delete(data, levelKey)
}
logrusErrKey := fieldMap.resolve(FieldKeyLogrusError)
if l, ok := data[logrusErrKey]; ok {
data["fields."+logrusErrKey] = l
delete(data, logrusErrKey)
}
// If reportCaller is not set, 'func' will not conflict.
if reportCaller {
funcKey := fieldMap.resolve(FieldKeyFunc)
if l, ok := data[funcKey]; ok {
data["fields."+funcKey] = l
}
fileKey := fieldMap.resolve(FieldKeyFile)
if l, ok := data[fileKey]; ok {
data["fields."+fileKey] = l
}
}
}

View File

@ -1,6 +1,7 @@
package logrus
import (
"bytes"
"encoding/json"
"fmt"
)
@ -10,13 +11,6 @@ type fieldKey string
// FieldMap allows customization of the key names for default fields.
type FieldMap map[fieldKey]string
// Default key names for the default fields
const (
FieldKeyMsg = "msg"
FieldKeyLevel = "level"
FieldKeyTime = "time"
)
func (f FieldMap) resolve(key fieldKey) string {
if k, ok := f[key]; ok {
return k
@ -33,21 +27,28 @@ type JSONFormatter struct {
// DisableTimestamp allows disabling automatic timestamps in output
DisableTimestamp bool
// DataKey allows users to put all the log entry parameters into a nested dictionary at a given key.
DataKey string
// FieldMap allows users to customize the names of keys for default fields.
// As an example:
// formatter := &JSONFormatter{
// FieldMap: FieldMap{
// FieldKeyTime: "@timestamp",
// FieldKeyTime: "@timestamp",
// FieldKeyLevel: "@level",
// FieldKeyMsg: "@message",
// FieldKeyMsg: "@message",
// FieldKeyFunc: "@caller",
// },
// }
FieldMap FieldMap
// PrettyPrint will indent all json logs
PrettyPrint bool
}
// Format renders a single log entry
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
data := make(Fields, len(entry.Data)+3)
data := make(Fields, len(entry.Data)+4)
for k, v := range entry.Data {
switch v := v.(type) {
case error:
@ -58,22 +59,47 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
data[k] = v
}
}
prefixFieldClashes(data)
if f.DataKey != "" {
newData := make(Fields, 4)
newData[f.DataKey] = data
data = newData
}
prefixFieldClashes(data, f.FieldMap, entry.HasCaller())
timestampFormat := f.TimestampFormat
if timestampFormat == "" {
timestampFormat = defaultTimestampFormat
}
if entry.err != "" {
data[f.FieldMap.resolve(FieldKeyLogrusError)] = entry.err
}
if !f.DisableTimestamp {
data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat)
}
data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message
data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String()
if entry.HasCaller() {
data[f.FieldMap.resolve(FieldKeyFunc)] = entry.Caller.Function
data[f.FieldMap.resolve(FieldKeyFile)] = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line)
}
serialized, err := json.Marshal(data)
if err != nil {
var b *bytes.Buffer
if entry.Buffer != nil {
b = entry.Buffer
} else {
b = &bytes.Buffer{}
}
encoder := json.NewEncoder(b)
if f.PrettyPrint {
encoder.SetIndent("", " ")
}
if err := encoder.Encode(data); err != nil {
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
}
return append(serialized, '\n'), nil
return b.Bytes(), nil
}

View File

@ -5,12 +5,13 @@ import (
"os"
"sync"
"sync/atomic"
"time"
)
type Logger struct {
// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
// file, or leave it default which is `os.Stderr`. You can also set this to
// something more adventorous, such as logging to Kafka.
// something more adventurous, such as logging to Kafka.
Out io.Writer
// Hooks for the logger instance. These allow firing events based on logging
// levels and log entries. For example, to send errors to an error tracking
@ -23,6 +24,10 @@ type Logger struct {
// own that implements the `Formatter` interface, see the `README` or included
// formatters for examples.
Formatter Formatter
// Flag for whether to log caller info (off by default)
ReportCaller bool
// The logging level the logger should log at. This is typically (and defaults
// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
// logged.
@ -31,8 +36,12 @@ type Logger struct {
mu MutexWrap
// Reusable empty entry
entryPool sync.Pool
// Function to exit the application, defaults to `os.Exit()`
ExitFunc exitFunc
}
type exitFunc func(int)
type MutexWrap struct {
lock sync.Mutex
disabled bool
@ -68,10 +77,12 @@ func (mw *MutexWrap) Disable() {
// It's recommended to make this a global instance called `log`.
func New() *Logger {
return &Logger{
Out: os.Stderr,
Formatter: new(TextFormatter),
Hooks: make(LevelHooks),
Level: InfoLevel,
Out: os.Stderr,
Formatter: new(TextFormatter),
Hooks: make(LevelHooks),
Level: InfoLevel,
ExitFunc: os.Exit,
ReportCaller: false,
}
}
@ -84,11 +95,12 @@ func (logger *Logger) newEntry() *Entry {
}
func (logger *Logger) releaseEntry(entry *Entry) {
entry.Data = map[string]interface{}{}
logger.entryPool.Put(entry)
}
// Adds a field to the log entry, note that it doesn't log until you call
// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
// Debug, Print, Info, Warn, Error, Fatal or Panic. It only creates a log entry.
// If you want multiple fields, use `WithFields`.
func (logger *Logger) WithField(key string, value interface{}) *Entry {
entry := logger.newEntry()
@ -112,20 +124,31 @@ func (logger *Logger) WithError(err error) *Entry {
return entry.WithError(err)
}
func (logger *Logger) Debugf(format string, args ...interface{}) {
if logger.level() >= DebugLevel {
// Overrides the time of the log entry.
func (logger *Logger) WithTime(t time.Time) *Entry {
entry := logger.newEntry()
defer logger.releaseEntry(entry)
return entry.WithTime(t)
}
func (logger *Logger) Logf(level Level, format string, args ...interface{}) {
if logger.IsLevelEnabled(level) {
entry := logger.newEntry()
entry.Debugf(format, args...)
entry.Logf(level, format, args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Tracef(format string, args ...interface{}) {
logger.Logf(TraceLevel, format, args...)
}
func (logger *Logger) Debugf(format string, args ...interface{}) {
logger.Logf(DebugLevel, format, args...)
}
func (logger *Logger) Infof(format string, args ...interface{}) {
if logger.level() >= InfoLevel {
entry := logger.newEntry()
entry.Infof(format, args...)
logger.releaseEntry(entry)
}
logger.Logf(InfoLevel, format, args...)
}
func (logger *Logger) Printf(format string, args ...interface{}) {
@ -135,60 +158,44 @@ func (logger *Logger) Printf(format string, args ...interface{}) {
}
func (logger *Logger) Warnf(format string, args ...interface{}) {
if logger.level() >= WarnLevel {
entry := logger.newEntry()
entry.Warnf(format, args...)
logger.releaseEntry(entry)
}
logger.Logf(WarnLevel, format, args...)
}
func (logger *Logger) Warningf(format string, args ...interface{}) {
if logger.level() >= WarnLevel {
entry := logger.newEntry()
entry.Warnf(format, args...)
logger.releaseEntry(entry)
}
logger.Warnf(format, args...)
}
func (logger *Logger) Errorf(format string, args ...interface{}) {
if logger.level() >= ErrorLevel {
entry := logger.newEntry()
entry.Errorf(format, args...)
logger.releaseEntry(entry)
}
logger.Logf(ErrorLevel, format, args...)
}
func (logger *Logger) Fatalf(format string, args ...interface{}) {
if logger.level() >= FatalLevel {
entry := logger.newEntry()
entry.Fatalf(format, args...)
logger.releaseEntry(entry)
}
Exit(1)
logger.Logf(FatalLevel, format, args...)
logger.Exit(1)
}
func (logger *Logger) Panicf(format string, args ...interface{}) {
if logger.level() >= PanicLevel {
logger.Logf(PanicLevel, format, args...)
}
func (logger *Logger) Log(level Level, args ...interface{}) {
if logger.IsLevelEnabled(level) {
entry := logger.newEntry()
entry.Panicf(format, args...)
entry.Log(level, args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Trace(args ...interface{}) {
logger.Log(TraceLevel, args...)
}
func (logger *Logger) Debug(args ...interface{}) {
if logger.level() >= DebugLevel {
entry := logger.newEntry()
entry.Debug(args...)
logger.releaseEntry(entry)
}
logger.Log(DebugLevel, args...)
}
func (logger *Logger) Info(args ...interface{}) {
if logger.level() >= InfoLevel {
entry := logger.newEntry()
entry.Info(args...)
logger.releaseEntry(entry)
}
logger.Log(InfoLevel, args...)
}
func (logger *Logger) Print(args ...interface{}) {
@ -198,60 +205,44 @@ func (logger *Logger) Print(args ...interface{}) {
}
func (logger *Logger) Warn(args ...interface{}) {
if logger.level() >= WarnLevel {
entry := logger.newEntry()
entry.Warn(args...)
logger.releaseEntry(entry)
}
logger.Log(WarnLevel, args...)
}
func (logger *Logger) Warning(args ...interface{}) {
if logger.level() >= WarnLevel {
entry := logger.newEntry()
entry.Warn(args...)
logger.releaseEntry(entry)
}
logger.Warn(args...)
}
func (logger *Logger) Error(args ...interface{}) {
if logger.level() >= ErrorLevel {
entry := logger.newEntry()
entry.Error(args...)
logger.releaseEntry(entry)
}
logger.Log(ErrorLevel, args...)
}
func (logger *Logger) Fatal(args ...interface{}) {
if logger.level() >= FatalLevel {
entry := logger.newEntry()
entry.Fatal(args...)
logger.releaseEntry(entry)
}
Exit(1)
logger.Log(FatalLevel, args...)
logger.Exit(1)
}
func (logger *Logger) Panic(args ...interface{}) {
if logger.level() >= PanicLevel {
logger.Log(PanicLevel, args...)
}
func (logger *Logger) Logln(level Level, args ...interface{}) {
if logger.IsLevelEnabled(level) {
entry := logger.newEntry()
entry.Panic(args...)
entry.Logln(level, args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Traceln(args ...interface{}) {
logger.Logln(TraceLevel, args...)
}
func (logger *Logger) Debugln(args ...interface{}) {
if logger.level() >= DebugLevel {
entry := logger.newEntry()
entry.Debugln(args...)
logger.releaseEntry(entry)
}
logger.Logln(DebugLevel, args...)
}
func (logger *Logger) Infoln(args ...interface{}) {
if logger.level() >= InfoLevel {
entry := logger.newEntry()
entry.Infoln(args...)
logger.releaseEntry(entry)
}
logger.Logln(InfoLevel, args...)
}
func (logger *Logger) Println(args ...interface{}) {
@ -261,44 +252,32 @@ func (logger *Logger) Println(args ...interface{}) {
}
func (logger *Logger) Warnln(args ...interface{}) {
if logger.level() >= WarnLevel {
entry := logger.newEntry()
entry.Warnln(args...)
logger.releaseEntry(entry)
}
logger.Logln(WarnLevel, args...)
}
func (logger *Logger) Warningln(args ...interface{}) {
if logger.level() >= WarnLevel {
entry := logger.newEntry()
entry.Warnln(args...)
logger.releaseEntry(entry)
}
logger.Warn(args...)
}
func (logger *Logger) Errorln(args ...interface{}) {
if logger.level() >= ErrorLevel {
entry := logger.newEntry()
entry.Errorln(args...)
logger.releaseEntry(entry)
}
logger.Logln(ErrorLevel, args...)
}
func (logger *Logger) Fatalln(args ...interface{}) {
if logger.level() >= FatalLevel {
entry := logger.newEntry()
entry.Fatalln(args...)
logger.releaseEntry(entry)
}
Exit(1)
logger.Logln(FatalLevel, args...)
logger.Exit(1)
}
func (logger *Logger) Panicln(args ...interface{}) {
if logger.level() >= PanicLevel {
entry := logger.newEntry()
entry.Panicln(args...)
logger.releaseEntry(entry)
logger.Logln(PanicLevel, args...)
}
func (logger *Logger) Exit(code int) {
runHandlers()
if logger.ExitFunc == nil {
logger.ExitFunc = os.Exit
}
logger.ExitFunc(code)
}
//When file is opened with appending mode, it's safe to
@ -312,12 +291,53 @@ func (logger *Logger) level() Level {
return Level(atomic.LoadUint32((*uint32)(&logger.Level)))
}
// SetLevel sets the logger level.
func (logger *Logger) SetLevel(level Level) {
atomic.StoreUint32((*uint32)(&logger.Level), uint32(level))
}
// GetLevel returns the logger level.
func (logger *Logger) GetLevel() Level {
return logger.level()
}
// AddHook adds a hook to the logger hooks.
func (logger *Logger) AddHook(hook Hook) {
logger.mu.Lock()
defer logger.mu.Unlock()
logger.Hooks.Add(hook)
}
// IsLevelEnabled checks if the log level of the logger is greater than the level param
func (logger *Logger) IsLevelEnabled(level Level) bool {
return logger.level() >= level
}
// SetFormatter sets the logger formatter.
func (logger *Logger) SetFormatter(formatter Formatter) {
logger.mu.Lock()
defer logger.mu.Unlock()
logger.Formatter = formatter
}
// SetOutput sets the logger output.
func (logger *Logger) SetOutput(output io.Writer) {
logger.mu.Lock()
defer logger.mu.Unlock()
logger.Out = output
}
func (logger *Logger) SetReportCaller(reportCaller bool) {
logger.mu.Lock()
defer logger.mu.Unlock()
logger.ReportCaller = reportCaller
}
// ReplaceHooks replaces the logger hooks and returns the old ones
func (logger *Logger) ReplaceHooks(hooks LevelHooks) LevelHooks {
logger.mu.Lock()
oldHooks := logger.Hooks
logger.Hooks = hooks
logger.mu.Unlock()
return oldHooks
}

View File

@ -14,22 +14,11 @@ type Level uint32
// Convert the Level to a string. E.g. PanicLevel becomes "panic".
func (level Level) String() string {
switch level {
case DebugLevel:
return "debug"
case InfoLevel:
return "info"
case WarnLevel:
return "warning"
case ErrorLevel:
return "error"
case FatalLevel:
return "fatal"
case PanicLevel:
return "panic"
if b, err := level.MarshalText(); err == nil {
return string(b)
} else {
return "unknown"
}
return "unknown"
}
// ParseLevel takes a string level and returns the Logrus log level constant.
@ -47,12 +36,47 @@ func ParseLevel(lvl string) (Level, error) {
return InfoLevel, nil
case "debug":
return DebugLevel, nil
case "trace":
return TraceLevel, nil
}
var l Level
return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
}
// UnmarshalText implements encoding.TextUnmarshaler.
func (level *Level) UnmarshalText(text []byte) error {
l, err := ParseLevel(string(text))
if err != nil {
return err
}
*level = Level(l)
return nil
}
func (level Level) MarshalText() ([]byte, error) {
switch level {
case TraceLevel:
return []byte("trace"), nil
case DebugLevel:
return []byte("debug"), nil
case InfoLevel:
return []byte("info"), nil
case WarnLevel:
return []byte("warning"), nil
case ErrorLevel:
return []byte("error"), nil
case FatalLevel:
return []byte("fatal"), nil
case PanicLevel:
return []byte("panic"), nil
}
return nil, fmt.Errorf("not a valid lorus level %q", level)
}
// A constant exposing all logging levels
var AllLevels = []Level{
PanicLevel,
@ -61,6 +85,7 @@ var AllLevels = []Level{
WarnLevel,
InfoLevel,
DebugLevel,
TraceLevel,
}
// These are the different logging levels. You can set the logging level to log
@ -69,7 +94,7 @@ const (
// PanicLevel level, highest level of severity. Logs and then calls panic with the
// message passed to Debug, Info, ...
PanicLevel Level = iota
// FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
// FatalLevel level. Logs and then calls `logger.Exit(1)`. It will exit even if the
// logging level is set to Panic.
FatalLevel
// ErrorLevel level. Logs. Used for errors that should definitely be noted.
@ -82,6 +107,8 @@ const (
InfoLevel
// DebugLevel level. Usually only enabled when debugging. Very verbose logging.
DebugLevel
// TraceLevel level. Designates finer-grained informational events than the Debug.
TraceLevel
)
// Won't compile if StdLogger can't be realized by a log.Logger
@ -140,4 +167,20 @@ type FieldLogger interface {
Errorln(args ...interface{})
Fatalln(args ...interface{})
Panicln(args ...interface{})
// IsDebugEnabled() bool
// IsInfoEnabled() bool
// IsWarnEnabled() bool
// IsErrorEnabled() bool
// IsFatalEnabled() bool
// IsPanicEnabled() bool
}
// Ext1FieldLogger (the first extension to FieldLogger) is superfluous, it is
// here for consistancy. Do not use. Use Logger or Entry instead.
type Ext1FieldLogger interface {
FieldLogger
Tracef(format string, args ...interface{})
Trace(args ...interface{})
Traceln(args ...interface{})
}

View File

@ -1,10 +0,0 @@
// +build darwin freebsd openbsd netbsd dragonfly
// +build !appengine
package logrus
import "golang.org/x/sys/unix"
const ioctlReadTermios = unix.TIOCGETA
type Termios unix.Termios

View File

@ -0,0 +1,9 @@
// +build !appengine,!js,!windows,aix
package logrus
import "io"
func checkIfTerminal(w io.Writer) bool {
return false
}

View File

@ -0,0 +1,11 @@
// +build appengine
package logrus
import (
"io"
)
func checkIfTerminal(w io.Writer) bool {
return true
}

11
vendor/github.com/sirupsen/logrus/terminal_check_js.go generated vendored Normal file
View File

@ -0,0 +1,11 @@
// +build js
package logrus
import (
"io"
)
func checkIfTerminal(w io.Writer) bool {
return false
}

View File

@ -0,0 +1,19 @@
// +build !appengine,!js,!windows,!aix
package logrus
import (
"io"
"os"
"golang.org/x/crypto/ssh/terminal"
)
func checkIfTerminal(w io.Writer) bool {
switch v := w.(type) {
case *os.File:
return terminal.IsTerminal(int(v.Fd()))
default:
return false
}
}

View File

@ -0,0 +1,20 @@
// +build !appengine,!js,windows
package logrus
import (
"io"
"os"
"syscall"
)
func checkIfTerminal(w io.Writer) bool {
switch v := w.(type) {
case *os.File:
var mode uint32
err := syscall.GetConsoleMode(syscall.Handle(v.Fd()), &mode)
return err == nil
default:
return false
}
}

View File

@ -0,0 +1,8 @@
// +build !windows
package logrus
import "io"
func initTerminal(w io.Writer) {
}

18
vendor/github.com/sirupsen/logrus/terminal_windows.go generated vendored Normal file
View File

@ -0,0 +1,18 @@
// +build !appengine,!js,windows
package logrus
import (
"io"
"os"
"syscall"
sequences "github.com/konsorten/go-windows-terminal-sequences"
)
func initTerminal(w io.Writer) {
switch v := w.(type) {
case *os.File:
sequences.EnableVirtualTerminalProcessing(syscall.Handle(v.Fd()), true)
}
}

View File

@ -3,14 +3,12 @@ package logrus
import (
"bytes"
"fmt"
"io"
"os"
"runtime"
"sort"
"strings"
"sync"
"time"
"golang.org/x/crypto/ssh/terminal"
)
const (
@ -24,6 +22,7 @@ const (
var (
baseTimestamp time.Time
emptyFieldMap FieldMap
)
func init() {
@ -38,6 +37,9 @@ type TextFormatter struct {
// Force disabling colors.
DisableColors bool
// Override coloring based on CLICOLOR and CLICOLOR_FORCE. - https://bixense.com/clicolors/
EnvironmentOverrideColors bool
// Disable timestamp logging. useful when output is redirected to logging
// system that already adds timestamps.
DisableTimestamp bool
@ -54,69 +56,135 @@ type TextFormatter struct {
// be desired.
DisableSorting bool
// The keys sorting function, when uninitialized it uses sort.Strings.
SortingFunc func([]string)
// Disables the truncation of the level text to 4 characters.
DisableLevelTruncation bool
// QuoteEmptyFields will wrap empty fields in quotes if true
QuoteEmptyFields bool
// Whether the logger's out is to a terminal
isTerminal bool
sync.Once
// FieldMap allows users to customize the names of keys for default fields.
// As an example:
// formatter := &TextFormatter{
// FieldMap: FieldMap{
// FieldKeyTime: "@timestamp",
// FieldKeyLevel: "@level",
// FieldKeyMsg: "@message"}}
FieldMap FieldMap
terminalInitOnce sync.Once
}
func (f *TextFormatter) init(entry *Entry) {
if entry.Logger != nil {
f.isTerminal = f.checkIfTerminal(entry.Logger.Out)
f.isTerminal = checkIfTerminal(entry.Logger.Out)
if f.isTerminal {
initTerminal(entry.Logger.Out)
}
}
}
func (f *TextFormatter) checkIfTerminal(w io.Writer) bool {
switch v := w.(type) {
case *os.File:
return terminal.IsTerminal(int(v.Fd()))
default:
return false
func (f *TextFormatter) isColored() bool {
isColored := f.ForceColors || (f.isTerminal && (runtime.GOOS != "windows"))
if f.EnvironmentOverrideColors {
if force, ok := os.LookupEnv("CLICOLOR_FORCE"); ok && force != "0" {
isColored = true
} else if ok && force == "0" {
isColored = false
} else if os.Getenv("CLICOLOR") == "0" {
isColored = false
}
}
return isColored && !f.DisableColors
}
// Format renders a single log entry
func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
var b *bytes.Buffer
keys := make([]string, 0, len(entry.Data))
for k := range entry.Data {
data := make(Fields)
for k, v := range entry.Data {
data[k] = v
}
prefixFieldClashes(data, f.FieldMap, entry.HasCaller())
keys := make([]string, 0, len(data))
for k := range data {
keys = append(keys, k)
}
if !f.DisableSorting {
sort.Strings(keys)
fixedKeys := make([]string, 0, 4+len(data))
if !f.DisableTimestamp {
fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyTime))
}
fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLevel))
if entry.Message != "" {
fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyMsg))
}
if entry.err != "" {
fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLogrusError))
}
if entry.HasCaller() {
fixedKeys = append(fixedKeys,
f.FieldMap.resolve(FieldKeyFunc), f.FieldMap.resolve(FieldKeyFile))
}
if !f.DisableSorting {
if f.SortingFunc == nil {
sort.Strings(keys)
fixedKeys = append(fixedKeys, keys...)
} else {
if !f.isColored() {
fixedKeys = append(fixedKeys, keys...)
f.SortingFunc(fixedKeys)
} else {
f.SortingFunc(keys)
}
}
} else {
fixedKeys = append(fixedKeys, keys...)
}
var b *bytes.Buffer
if entry.Buffer != nil {
b = entry.Buffer
} else {
b = &bytes.Buffer{}
}
prefixFieldClashes(entry.Data)
f.Do(func() { f.init(entry) })
isColored := (f.ForceColors || f.isTerminal) && !f.DisableColors
f.terminalInitOnce.Do(func() { f.init(entry) })
timestampFormat := f.TimestampFormat
if timestampFormat == "" {
timestampFormat = defaultTimestampFormat
}
if isColored {
f.printColored(b, entry, keys, timestampFormat)
if f.isColored() {
f.printColored(b, entry, keys, data, timestampFormat)
} else {
if !f.DisableTimestamp {
f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat))
}
f.appendKeyValue(b, "level", entry.Level.String())
if entry.Message != "" {
f.appendKeyValue(b, "msg", entry.Message)
}
for _, key := range keys {
f.appendKeyValue(b, key, entry.Data[key])
for _, key := range fixedKeys {
var value interface{}
switch {
case key == f.FieldMap.resolve(FieldKeyTime):
value = entry.Time.Format(timestampFormat)
case key == f.FieldMap.resolve(FieldKeyLevel):
value = entry.Level.String()
case key == f.FieldMap.resolve(FieldKeyMsg):
value = entry.Message
case key == f.FieldMap.resolve(FieldKeyLogrusError):
value = entry.err
case key == f.FieldMap.resolve(FieldKeyFunc) && entry.HasCaller():
value = entry.Caller.Function
case key == f.FieldMap.resolve(FieldKeyFile) && entry.HasCaller():
value = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line)
default:
value = data[key]
}
f.appendKeyValue(b, key, value)
}
}
@ -124,10 +192,10 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
return b.Bytes(), nil
}
func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) {
func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, data Fields, timestampFormat string) {
var levelColor int
switch entry.Level {
case DebugLevel:
case DebugLevel, TraceLevel:
levelColor = gray
case WarnLevel:
levelColor = yellow
@ -137,17 +205,31 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin
levelColor = blue
}
levelText := strings.ToUpper(entry.Level.String())[0:4]
levelText := strings.ToUpper(entry.Level.String())
if !f.DisableLevelTruncation {
levelText = levelText[0:4]
}
// Remove a single newline if it already exists in the message to keep
// the behavior of logrus text_formatter the same as the stdlib log package
entry.Message = strings.TrimSuffix(entry.Message, "\n")
caller := ""
if entry.HasCaller() {
caller = fmt.Sprintf("%s:%d %s()",
entry.Caller.File, entry.Caller.Line, entry.Caller.Function)
}
if f.DisableTimestamp {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message)
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m%s %-44s ", levelColor, levelText, caller, entry.Message)
} else if !f.FullTimestamp {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), entry.Message)
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d]%s %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), caller, entry.Message)
} else {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message)
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s]%s %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), caller, entry.Message)
}
for _, k := range keys {
v := entry.Data[k]
v := data[k]
fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k)
f.appendValue(b, v)
}

View File

@ -24,6 +24,8 @@ func (entry *Entry) WriterLevel(level Level) *io.PipeWriter {
var printFunc func(args ...interface{})
switch level {
case TraceLevel:
printFunc = entry.Trace
case DebugLevel:
printFunc = entry.Debug
case InfoLevel:

View File

@ -60,13 +60,74 @@ type Capabilities interface {
Apply(kind CapType) error
}
// NewPid create new initialized Capabilities object for given pid when it
// is nonzero, or for the current pid if pid is 0
// NewPid initializes a new Capabilities object for given pid when
// it is nonzero, or for the current process if pid is 0.
//
// Deprecated: Replace with NewPid2. For example, replace:
//
// c, err := NewPid(0)
// if err != nil {
// return err
// }
//
// with:
//
// c, err := NewPid2(0)
// if err != nil {
// return err
// }
// err = c.Load()
// if err != nil {
// return err
// }
func NewPid(pid int) (Capabilities, error) {
c, err := newPid(pid)
if err != nil {
return c, err
}
err = c.Load()
return c, err
}
// NewPid2 initializes a new Capabilities object for given pid when
// it is nonzero, or for the current process if pid is 0. This
// does not load the process's current capabilities; to do that you
// must call Load explicitly.
func NewPid2(pid int) (Capabilities, error) {
return newPid(pid)
}
// NewFile create new initialized Capabilities object for given named file.
func NewFile(name string) (Capabilities, error) {
return newFile(name)
// NewFile initializes a new Capabilities object for given file path.
//
// Deprecated: Replace with NewFile2. For example, replace:
//
// c, err := NewFile(path)
// if err != nil {
// return err
// }
//
// with:
//
// c, err := NewFile2(path)
// if err != nil {
// return err
// }
// err = c.Load()
// if err != nil {
// return err
// }
func NewFile(path string) (Capabilities, error) {
c, err := newFile(path)
if err != nil {
return c, err
}
err = c.Load()
return c, err
}
// NewFile2 creates a new initialized Capabilities object for given
// file path. This does not load the process's current capabilities;
// to do that you must call Load explicitly.
func NewFile2(path string) (Capabilities, error) {
return newFile(path)
}

View File

@ -103,21 +103,17 @@ func newPid(pid int) (c Capabilities, err error) {
case linuxCapVer1:
p := new(capsV1)
p.hdr.version = capVers
p.hdr.pid = pid
p.hdr.pid = int32(pid)
c = p
case linuxCapVer2, linuxCapVer3:
p := new(capsV3)
p.hdr.version = capVers
p.hdr.pid = pid
p.hdr.pid = int32(pid)
c = p
default:
err = errUnknownVers
return
}
err = c.Load()
if err != nil {
c = nil
}
return
}
@ -428,11 +424,11 @@ func (c *capsV3) Load() (err error) {
}
if strings.HasPrefix(line, "CapB") {
fmt.Sscanf(line[4:], "nd: %08x%08x", &c.bounds[1], &c.bounds[0])
break
continue
}
if strings.HasPrefix(line, "CapA") {
fmt.Sscanf(line[4:], "mb: %08x%08x", &c.ambient[1], &c.ambient[0])
break
continue
}
}
f.Close()
@ -492,10 +488,6 @@ func (c *capsV3) Apply(kind CapType) (err error) {
func newFile(path string) (c Capabilities, err error) {
c = &capsFile{path: path}
err = c.Load()
if err != nil {
c = nil
}
return
}

View File

@ -13,7 +13,7 @@ import (
type capHeader struct {
version uint32
pid int
pid int32
}
type capData struct {

117
vendor/github.com/tchap/go-patricia/README.md generated vendored Normal file
View File

@ -0,0 +1,117 @@
# go-patricia #
**Documentation**: [GoDoc](http://godoc.org/github.com/tchap/go-patricia/patricia)<br />
**Test Coverage**: [![Coverage
Status](https://coveralls.io/repos/tchap/go-patricia/badge.png)](https://coveralls.io/r/tchap/go-patricia)
## About ##
A generic patricia trie (also called radix tree) implemented in Go (Golang).
The patricia trie as implemented in this library enables fast visiting of items
in some particular ways:
1. visit all items saved in the tree,
2. visit all items matching particular prefix (visit subtree), or
3. given a string, visit all items matching some prefix of that string.
`[]byte` type is used for keys, `interface{}` for values.
`Trie` is not thread safe. Synchronize the access yourself.
### State of the Project ###
Apparently some people are using this, so the API should not change often.
Any ideas on how to make the library better are still welcome.
More (unit) testing would be cool as well...
## Usage ##
Import the package from GitHub first.
```go
import "github.com/tchap/go-patricia/patricia"
```
You can as well use gopkg.in thingie:
```go
import "gopkg.in/tchap/go-patricia.v2/patricia"
```
Then you can start having fun.
```go
printItem := func(prefix patricia.Prefix, item patricia.Item) error {
fmt.Printf("%q: %v\n", prefix, item)
return nil
}
// Create a new default trie (using the default parameter values).
trie := NewTrie()
// Create a new custom trie.
trie := NewTrie(MaxPrefixPerNode(16), MaxChildrenPerSparseNode(10))
// Insert some items.
trie.Insert(Prefix("Pepa Novak"), 1)
trie.Insert(Prefix("Pepa Sindelar"), 2)
trie.Insert(Prefix("Karel Macha"), 3)
trie.Insert(Prefix("Karel Hynek Macha"), 4)
// Just check if some things are present in the tree.
key := Prefix("Pepa Novak")
fmt.Printf("%q present? %v\n", key, trie.Match(key))
// "Pepa Novak" present? true
key = Prefix("Karel")
fmt.Printf("Anybody called %q here? %v\n", key, trie.MatchSubtree(key))
// Anybody called "Karel" here? true
// Walk the tree in alphabetical order.
trie.Visit(printItem)
// "Karel Hynek Macha": 4
// "Karel Macha": 3
// "Pepa Novak": 1
// "Pepa Sindelar": 2
// Walk a subtree.
trie.VisitSubtree(Prefix("Pepa"), printItem)
// "Pepa Novak": 1
// "Pepa Sindelar": 2
// Modify an item, then fetch it from the tree.
trie.Set(Prefix("Karel Hynek Macha"), 10)
key = Prefix("Karel Hynek Macha")
fmt.Printf("%q: %v\n", key, trie.Get(key))
// "Karel Hynek Macha": 10
// Walk prefixes.
prefix := Prefix("Karel Hynek Macha je kouzelnik")
trie.VisitPrefixes(prefix, printItem)
// "Karel Hynek Macha": 10
// Delete some items.
trie.Delete(Prefix("Pepa Novak"))
trie.Delete(Prefix("Karel Macha"))
// Walk again.
trie.Visit(printItem)
// "Karel Hynek Macha": 10
// "Pepa Sindelar": 2
// Delete a subtree.
trie.DeleteSubtree(Prefix("Pepa"))
// Print what is left.
trie.Visit(printItem)
// "Karel Hynek Macha": 10
```
## License ##
MIT, check the `LICENSE` file.
[![Gittip
Badge](http://img.shields.io/gittip/alanhamlett.png)](https://www.gittip.com/tchap/
"Gittip Badge")

View File

@ -1,6 +1,39 @@
# gojsonpointer
An implementation of JSON Pointer - Go language
## Usage
jsonText := `{
"name": "Bobby B",
"occupation": {
"title" : "King",
"years" : 15,
"heir" : "Joffrey B"
}
}`
var jsonDocument map[string]interface{}
json.Unmarshal([]byte(jsonText), &jsonDocument)
//create a JSON pointer
pointerString := "/occupation/title"
pointer, _ := NewJsonPointer(pointerString)
//SET a new value for the "title" in the document
pointer.Set(jsonDocument, "Supreme Leader of Westeros")
//GET the new "title" from the document
title, _, _ := pointer.Get(jsonDocument)
fmt.Println(title) //outputs "Supreme Leader of Westeros"
//DELETE the "heir" from the document
deletePointer := NewJsonPointer("/occupation/heir")
deletePointer.Delete(jsonDocument)
b, _ := json.Marshal(jsonDocument)
fmt.Println(string(b))
//outputs `{"name":"Bobby B","occupation":{"title":"Supreme Leader of Westeros","years":15}}`
## References
http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07

View File

@ -90,6 +90,13 @@ func (p *JsonPointer) Set(document interface{}, value interface{}) (interface{},
}
// Uses the pointer to delete a value from a JSON document
func (p *JsonPointer) Delete(document interface{}) (interface{}, error) {
is := &implStruct{mode: "DEL", inDocument: document}
p.implementation(is)
return document, is.outError
}
// Both Get and Set functions use the same implementation to avoid code duplication
func (p *JsonPointer) implementation(i *implStruct) {
@ -106,9 +113,14 @@ func (p *JsonPointer) implementation(i *implStruct) {
node := i.inDocument
previousNodes := make([]interface{}, len(p.referenceTokens))
previousTokens := make([]string, len(p.referenceTokens))
for ti, token := range p.referenceTokens {
isLastToken := ti == len(p.referenceTokens)-1
previousNodes[ti] = node
previousTokens[ti] = token
switch v := node.(type) {
@ -118,7 +130,11 @@ func (p *JsonPointer) implementation(i *implStruct) {
node = v[decodedToken]
if isLastToken && i.mode == "SET" {
v[decodedToken] = i.setInValue
} else if isLastToken && i.mode =="DEL" {
delete(v,decodedToken)
}
} else if (isLastToken && i.mode == "SET") {
v[decodedToken] = i.setInValue
} else {
i.outError = fmt.Errorf("Object has no key '%s'", decodedToken)
i.getOutKind = reflect.Map
@ -144,6 +160,11 @@ func (p *JsonPointer) implementation(i *implStruct) {
node = v[tokenIndex]
if isLastToken && i.mode == "SET" {
v[tokenIndex] = i.setInValue
} else if isLastToken && i.mode =="DEL" {
v[tokenIndex] = v[len(v)-1]
v[len(v)-1] = nil
v = v[:len(v)-1]
previousNodes[ti-1].(map[string]interface{})[previousTokens[ti-1]] = v
}
default:

View File

@ -27,11 +27,12 @@ package gojsonreference
import (
"errors"
"github.com/xeipuuv/gojsonpointer"
"net/url"
"path/filepath"
"runtime"
"strings"
"github.com/xeipuuv/gojsonpointer"
)
const (
@ -124,16 +125,21 @@ func (r *JsonReference) parse(jsonReferenceString string) (err error) {
// Creates a new reference from a parent and a child
// If the child cannot inherit from the parent, an error is returned
func (r *JsonReference) Inherits(child JsonReference) (*JsonReference, error) {
childUrl := child.GetUrl()
parentUrl := r.GetUrl()
if childUrl == nil {
if child.GetUrl() == nil {
return nil, errors.New("childUrl is nil!")
}
if parentUrl == nil {
if r.GetUrl() == nil {
return nil, errors.New("parentUrl is nil!")
}
ref, err := NewJsonReference(parentUrl.ResolveReference(childUrl).String())
// Get a copy of the parent url to make sure we do not modify the original.
// URL reference resolving fails if the fragment of the child is empty, but the parent's is not.
// The fragment of the child must be used, so the fragment of the parent is manually removed.
parentUrl := *r.GetUrl()
parentUrl.Fragment = ""
ref, err := NewJsonReference(parentUrl.ResolveReference(child.GetUrl()).String())
if err != nil {
return nil, err
}

View File

@ -1,10 +1,11 @@
[![GoDoc](https://godoc.org/github.com/xeipuuv/gojsonschema?status.svg)](https://godoc.org/github.com/xeipuuv/gojsonschema)
[![Build Status](https://travis-ci.org/xeipuuv/gojsonschema.svg)](https://travis-ci.org/xeipuuv/gojsonschema)
# gojsonschema
## Description
An implementation of JSON Schema, based on IETF's draft v4 - Go language
An implementation of JSON Schema for the Go programming language. Supports draft-04, draft-06 and draft-07.
References :
@ -54,7 +55,6 @@ func main() {
fmt.Printf("- %s\n", desc)
}
}
}
@ -148,6 +148,87 @@ To check the result :
}
```
## Loading local schemas
By default `file` and `http(s)` references to external schemas are loaded automatically via the file system or via http(s). An external schema can also be loaded using a `SchemaLoader`.
```go
sl := gojsonschema.NewSchemaLoader()
loader1 := gojsonschema.NewStringLoader(`{ "type" : "string" }`)
err := sl.AddSchema("http://some_host.com/string.json", loader1)
```
Alternatively if your schema already has an `$id` you can use the `AddSchemas` function
```go
loader2 := gojsonschema.NewStringLoader(`{
"$id" : "http://some_host.com/maxlength.json",
"maxLength" : 5
}`)
err = sl.AddSchemas(loader2)
```
The main schema should be passed to the `Compile` function. This main schema can then directly reference the added schemas without needing to download them.
```go
loader3 := gojsonschema.NewStringLoader(`{
"$id" : "http://some_host.com/main.json",
"allOf" : [
{ "$ref" : "http://some_host.com/string.json" },
{ "$ref" : "http://some_host.com/maxlength.json" }
]
}`)
schema, err := sl.Compile(loader3)
documentLoader := gojsonschema.NewStringLoader(`"hello world"`)
result, err := schema.Validate(documentLoader)
```
It's also possible to pass a `ReferenceLoader` to the `Compile` function that references a loaded schema.
```go
err = sl.AddSchemas(loader3)
schema, err := sl.Compile(gojsonschema.NewReferenceLoader("http://some_host.com/main.json"))
```
Schemas added by `AddSchema` and `AddSchemas` are only validated when the entire schema is compiled, unless meta-schema validation is used.
## Using a specific draft
By default `gojsonschema` will try to detect the draft of a schema by using the `$schema` keyword and parse it in a strict draft-04, draft-06 or draft-07 mode. If `$schema` is missing, or the draft version is not explicitely set, a hybrid mode is used which merges together functionality of all drafts into one mode.
Autodectection can be turned off with the `AutoDetect` property. Specific draft versions can be specified with the `Draft` property.
```go
sl := gojsonschema.NewSchemaLoader()
sl.Draft = gojsonschema.Draft7
sl.AutoDetect = false
```
If autodetection is on (default), a draft-07 schema can savely reference draft-04 schemas and vice-versa, as long as `$schema` is specified in all schemas.
## Meta-schema validation
Schemas that are added using the `AddSchema`, `AddSchemas` and `Compile` can be validated against their meta-schema by setting the `Validate` property.
The following example will produce an error as `multipleOf` must be a number. If `Validate` is off (default), this error is only returned at the `Compile` step.
```go
sl := gojsonschema.NewSchemaLoader()
sl.Validate = true
err := sl.AddSchemas(gojsonschema.NewStringLoader(`{
$id" : "http://some_host.com/invalid.json",
"$schema": "http://json-schema.org/draft-07/schema#",
"multipleOf" : true
}`))
```
```
```
Errors returned by meta-schema validation are more readable and contain more information, which helps significantly if you are developing a schema.
Meta-schema validation also works with a custom `$schema`. In case `$schema` is missing, or `AutoDetect` is set to `false`, the meta-schema of the used draft is used.
## Working with Errors
The library handles string error codes which you can customize by creating your own gojsonschema.locale and setting it
@ -155,7 +236,9 @@ The library handles string error codes which you can customize by creating your
gojsonschema.Locale = YourCustomLocale{}
```
However, each error contains additional contextual information.
However, each error contains additional contextual information.
Newer versions of `gojsonschema` may have new additional errors, so code that uses a custom locale will need to be updated when this happens.
**err.Type()**: *string* Returns the "type" of error that occurred. Note you can also type check. See below
@ -169,15 +252,18 @@ Note: An error of RequiredType has an err.Type() return value of "required"
"number_not": NumberNotError
"missing_dependency": MissingDependencyError
"internal": InternalError
"const": ConstEror
"enum": EnumError
"array_no_additional_items": ArrayNoAdditionalItemsError
"array_min_items": ArrayMinItemsError
"array_max_items": ArrayMaxItemsError
"unique": ItemsMustBeUniqueError
"contains" : ArrayContainsError
"array_min_properties": ArrayMinPropertiesError
"array_max_properties": ArrayMaxPropertiesError
"additional_property_not_allowed": AdditionalPropertyNotAllowedError
"invalid_property_pattern": InvalidPropertyPatternError
"invalid_property_name": InvalidPropertyNameError
"string_gte": StringLengthGTEError
"string_lte": StringLengthLTEError
"pattern": DoesNotMatchPatternError
@ -186,15 +272,19 @@ Note: An error of RequiredType has an err.Type() return value of "required"
"number_gt": NumberGTError
"number_lte": NumberLTEError
"number_lt": NumberLTError
"condition_then" : ConditionThenError
"condition_else" : ConditionElseError
**err.Value()**: *interface{}* Returns the value given
**err.Context()**: *gojsonschema.jsonContext* Returns the context. This has a String() method that will print something like this: (root).firstName
**err.Context()**: *gojsonschema.JsonContext* Returns the context. This has a String() method that will print something like this: (root).firstName
**err.Field()**: *string* Returns the fieldname in the format firstName, or for embedded properties, person.firstName. This returns the same as the String() method on *err.Context()* but removes the (root). prefix.
**err.Description()**: *string* The error description. This is based on the locale you are using. See the beginning of this section for overwriting the locale with a custom implementation.
**err.DescriptionFormat()**: *string* The error description format. This is relevant if you are adding custom validation errors afterwards to the result.
**err.Details()**: *gojsonschema.ErrorDetails* Returns a map[string]interface{} of additional error details specific to the error. For example, GTE errors will have a "min" value, LTE will have a "max" value. See errors.go for a full description of all the error details. Every error always contains a "field" key that holds the value of *err.Field()*
Note in most cases, the err.Details() will be used to generate replacement strings in your locales, and not used directly. These strings follow the text/template format i.e.
@ -225,10 +315,35 @@ Learn more about what types of template functions you can use in `ErrorTemplateF
## Formats
JSON Schema allows for optional "format" property to validate instances against well-known formats. gojsonschema ships with all of the formats defined in the spec that you can use like this:
````json
{"type": "string", "format": "email"}
````
Available formats: date-time, hostname, email, ipv4, ipv6, uri, uri-reference.
Not all formats defined in draft-07 are available. Implemented formats are:
* `date`
* `time`
* `date-time`
* `hostname`. Subdomains that start with a number are also supported, but this means that it doesn't strictly follow [RFC1034](http://tools.ietf.org/html/rfc1034#section-3.5) and has the implication that ipv4 addresses are also recognized as valid hostnames.
* `email`. Go's email parser deviates slightly from [RFC5322](https://tools.ietf.org/html/rfc5322). Includes unicode support.
* `idn-email`. Same caveat as `email`.
* `ipv4`
* `ipv6`
* `uri`. Includes unicode support.
* `uri-reference`. Includes unicode support.
* `iri`
* `iri-reference`
* `uri-template`
* `uuid`
* `regex`. Go uses the [RE2](https://github.com/google/re2/wiki/Syntax) engine and is not [ECMA262](http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-262.pdf) compatible.
* `json-pointer`
* `relative-json-pointer`
`email`, `uri` and `uri-reference` use the same validation code as their unicode counterparts `idn-email`, `iri` and `iri-reference`. If you rely on unicode support you should use the specific
unicode enabled formats for the sake of interoperability as other implementations might not support unicode in the regular formats.
The validation code for `uri`, `idn-email` and their relatives use mostly standard library code. Go 1.5 and 1.6 contain some minor bugs with handling URIs and unicode. You are encouraged to use Go 1.7+ if you rely on these formats.
For repetitive or more complex formats, you can create custom format checkers and add them to gojsonschema like this:
@ -285,7 +400,63 @@ func (f ValidUserIdFormatChecker) IsFormat(input interface{}) bool {
gojsonschema.FormatCheckers.Add("ValidUserId", ValidUserIdFormatChecker{})
````
Formats can also be removed, for example if you want to override one of the formats that is defined by default.
```go
gojsonschema.FormatCheckers.Remove("hostname")
```
## Additional custom validation
After the validation has run and you have the results, you may add additional
errors using `Result.AddError`. This is useful to maintain the same format within the resultset instead
of having to add special exceptions for your own errors. Below is an example.
```go
type AnswerInvalidError struct {
gojsonschema.ResultErrorFields
}
func newAnswerInvalidError(context *gojsonschema.JsonContext, value interface{}, details gojsonschema.ErrorDetails) *AnswerInvalidError {
err := AnswerInvalidError{}
err.SetContext(context)
err.SetType("custom_invalid_error")
// it is important to use SetDescriptionFormat() as this is used to call SetDescription() after it has been parsed
// using the description of err will be overridden by this.
err.SetDescriptionFormat("Answer to the Ultimate Question of Life, the Universe, and Everything is {{.answer}}")
err.SetValue(value)
err.SetDetails(details)
return &err
}
func main() {
// ...
schema, err := gojsonschema.NewSchema(schemaLoader)
result, err := gojsonschema.Validate(schemaLoader, documentLoader)
if true { // some validation
jsonContext := gojsonschema.NewJsonContext("question", nil)
errDetail := gojsonschema.ErrorDetails{
"answer": 42,
}
result.AddError(
newAnswerInvalidError(
gojsonschema.NewJsonContext("answer", jsonContext),
52,
errDetail,
),
errDetail,
)
}
return result, err
}
```
This is especially useful if you want to add validation beyond what the
json schema drafts can provide such business specific logic.
## Uses

118
vendor/github.com/xeipuuv/gojsonschema/draft.go generated vendored Normal file
View File

@ -0,0 +1,118 @@
// Copyright 2018 johandorland ( https://github.com/johandorland )
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gojsonschema
import (
"errors"
"math"
"reflect"
"github.com/xeipuuv/gojsonreference"
)
type Draft int
const (
Draft4 Draft = 4
Draft6 Draft = 6
Draft7 Draft = 7
Hybrid Draft = math.MaxInt32
)
type draftConfig struct {
Version Draft
MetaSchemaURL string
MetaSchema string
}
type draftConfigs []draftConfig
var drafts draftConfigs
func init() {
drafts = []draftConfig{
draftConfig{
Version: Draft4,
MetaSchemaURL: "http://json-schema.org/draft-04/schema",
MetaSchema: `{"id":"http://json-schema.org/draft-04/schema#","$schema":"http://json-schema.org/draft-04/schema#","description":"Core schema meta-schema","definitions":{"schemaArray":{"type":"array","minItems":1,"items":{"$ref":"#"}},"positiveInteger":{"type":"integer","minimum":0},"positiveIntegerDefault0":{"allOf":[{"$ref":"#/definitions/positiveInteger"},{"default":0}]},"simpleTypes":{"enum":["array","boolean","integer","null","number","object","string"]},"stringArray":{"type":"array","items":{"type":"string"},"minItems":1,"uniqueItems":true}},"type":"object","properties":{"id":{"type":"string"},"$schema":{"type":"string"},"title":{"type":"string"},"description":{"type":"string"},"default":{},"multipleOf":{"type":"number","minimum":0,"exclusiveMinimum":true},"maximum":{"type":"number"},"exclusiveMaximum":{"type":"boolean","default":false},"minimum":{"type":"number"},"exclusiveMinimum":{"type":"boolean","default":false},"maxLength":{"$ref":"#/definitions/positiveInteger"},"minLength":{"$ref":"#/definitions/positiveIntegerDefault0"},"pattern":{"type":"string","format":"regex"},"additionalItems":{"anyOf":[{"type":"boolean"},{"$ref":"#"}],"default":{}},"items":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/schemaArray"}],"default":{}},"maxItems":{"$ref":"#/definitions/positiveInteger"},"minItems":{"$ref":"#/definitions/positiveIntegerDefault0"},"uniqueItems":{"type":"boolean","default":false},"maxProperties":{"$ref":"#/definitions/positiveInteger"},"minProperties":{"$ref":"#/definitions/positiveIntegerDefault0"},"required":{"$ref":"#/definitions/stringArray"},"additionalProperties":{"anyOf":[{"type":"boolean"},{"$ref":"#"}],"default":{}},"definitions":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"properties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"patternProperties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"dependencies":{"type":"object","additionalProperties":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/stringArray"}]}},"enum":{"type":"array","minItems":1,"uniqueItems":true},"type":{"anyOf":[{"$ref":"#/definitions/simpleTypes"},{"type":"array","items":{"$ref":"#/definitions/simpleTypes"},"minItems":1,"uniqueItems":true}]},"format":{"type":"string"},"allOf":{"$ref":"#/definitions/schemaArray"},"anyOf":{"$ref":"#/definitions/schemaArray"},"oneOf":{"$ref":"#/definitions/schemaArray"},"not":{"$ref":"#"}},"dependencies":{"exclusiveMaximum":["maximum"],"exclusiveMinimum":["minimum"]},"default":{}}`,
},
draftConfig{
Version: Draft6,
MetaSchemaURL: "http://json-schema.org/draft-06/schema",
MetaSchema: `{"$schema":"http://json-schema.org/draft-06/schema#","$id":"http://json-schema.org/draft-06/schema#","title":"Core schema meta-schema","definitions":{"schemaArray":{"type":"array","minItems":1,"items":{"$ref":"#"}},"nonNegativeInteger":{"type":"integer","minimum":0},"nonNegativeIntegerDefault0":{"allOf":[{"$ref":"#/definitions/nonNegativeInteger"},{"default":0}]},"simpleTypes":{"enum":["array","boolean","integer","null","number","object","string"]},"stringArray":{"type":"array","items":{"type":"string"},"uniqueItems":true,"default":[]}},"type":["object","boolean"],"properties":{"$id":{"type":"string","format":"uri-reference"},"$schema":{"type":"string","format":"uri"},"$ref":{"type":"string","format":"uri-reference"},"title":{"type":"string"},"description":{"type":"string"},"default":{},"examples":{"type":"array","items":{}},"multipleOf":{"type":"number","exclusiveMinimum":0},"maximum":{"type":"number"},"exclusiveMaximum":{"type":"number"},"minimum":{"type":"number"},"exclusiveMinimum":{"type":"number"},"maxLength":{"$ref":"#/definitions/nonNegativeInteger"},"minLength":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"pattern":{"type":"string","format":"regex"},"additionalItems":{"$ref":"#"},"items":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/schemaArray"}],"default":{}},"maxItems":{"$ref":"#/definitions/nonNegativeInteger"},"minItems":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"uniqueItems":{"type":"boolean","default":false},"contains":{"$ref":"#"},"maxProperties":{"$ref":"#/definitions/nonNegativeInteger"},"minProperties":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"required":{"$ref":"#/definitions/stringArray"},"additionalProperties":{"$ref":"#"},"definitions":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"properties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"patternProperties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"dependencies":{"type":"object","additionalProperties":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/stringArray"}]}},"propertyNames":{"$ref":"#"},"const":{},"enum":{"type":"array","minItems":1,"uniqueItems":true},"type":{"anyOf":[{"$ref":"#/definitions/simpleTypes"},{"type":"array","items":{"$ref":"#/definitions/simpleTypes"},"minItems":1,"uniqueItems":true}]},"format":{"type":"string"},"allOf":{"$ref":"#/definitions/schemaArray"},"anyOf":{"$ref":"#/definitions/schemaArray"},"oneOf":{"$ref":"#/definitions/schemaArray"},"not":{"$ref":"#"}},"default":{}}`,
},
draftConfig{
Version: Draft7,
MetaSchemaURL: "http://json-schema.org/draft-07/schema",
MetaSchema: `{"$schema":"http://json-schema.org/draft-07/schema#","$id":"http://json-schema.org/draft-07/schema#","title":"Core schema meta-schema","definitions":{"schemaArray":{"type":"array","minItems":1,"items":{"$ref":"#"}},"nonNegativeInteger":{"type":"integer","minimum":0},"nonNegativeIntegerDefault0":{"allOf":[{"$ref":"#/definitions/nonNegativeInteger"},{"default":0}]},"simpleTypes":{"enum":["array","boolean","integer","null","number","object","string"]},"stringArray":{"type":"array","items":{"type":"string"},"uniqueItems":true,"default":[]}},"type":["object","boolean"],"properties":{"$id":{"type":"string","format":"uri-reference"},"$schema":{"type":"string","format":"uri"},"$ref":{"type":"string","format":"uri-reference"},"$comment":{"type":"string"},"title":{"type":"string"},"description":{"type":"string"},"default":true,"readOnly":{"type":"boolean","default":false},"examples":{"type":"array","items":true},"multipleOf":{"type":"number","exclusiveMinimum":0},"maximum":{"type":"number"},"exclusiveMaximum":{"type":"number"},"minimum":{"type":"number"},"exclusiveMinimum":{"type":"number"},"maxLength":{"$ref":"#/definitions/nonNegativeInteger"},"minLength":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"pattern":{"type":"string","format":"regex"},"additionalItems":{"$ref":"#"},"items":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/schemaArray"}],"default":true},"maxItems":{"$ref":"#/definitions/nonNegativeInteger"},"minItems":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"uniqueItems":{"type":"boolean","default":false},"contains":{"$ref":"#"},"maxProperties":{"$ref":"#/definitions/nonNegativeInteger"},"minProperties":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"required":{"$ref":"#/definitions/stringArray"},"additionalProperties":{"$ref":"#"},"definitions":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"properties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"patternProperties":{"type":"object","additionalProperties":{"$ref":"#"},"propertyNames":{"format":"regex"},"default":{}},"dependencies":{"type":"object","additionalProperties":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/stringArray"}]}},"propertyNames":{"$ref":"#"},"const":true,"enum":{"type":"array","items":true,"minItems":1,"uniqueItems":true},"type":{"anyOf":[{"$ref":"#/definitions/simpleTypes"},{"type":"array","items":{"$ref":"#/definitions/simpleTypes"},"minItems":1,"uniqueItems":true}]},"format":{"type":"string"},"contentMediaType":{"type":"string"},"contentEncoding":{"type":"string"},"if":{"$ref":"#"},"then":{"$ref":"#"},"else":{"$ref":"#"},"allOf":{"$ref":"#/definitions/schemaArray"},"anyOf":{"$ref":"#/definitions/schemaArray"},"oneOf":{"$ref":"#/definitions/schemaArray"},"not":{"$ref":"#"}},"default":true}`,
},
}
}
func (dc draftConfigs) GetMetaSchema(url string) string {
for _, config := range dc {
if config.MetaSchemaURL == url {
return config.MetaSchema
}
}
return ""
}
func (dc draftConfigs) GetDraftVersion(url string) *Draft {
for _, config := range dc {
if config.MetaSchemaURL == url {
return &config.Version
}
}
return nil
}
func (dc draftConfigs) GetSchemaURL(draft Draft) string {
for _, config := range dc {
if config.Version == draft {
return config.MetaSchemaURL
}
}
return ""
}
func parseSchemaURL(documentNode interface{}) (string, *Draft, error) {
if isKind(documentNode, reflect.Bool) {
return "", nil, nil
}
m := documentNode.(map[string]interface{})
if existsMapKey(m, KEY_SCHEMA) {
if !isKind(m[KEY_SCHEMA], reflect.String) {
return "", nil, errors.New(formatErrorDescription(
Locale.MustBeOfType(),
ErrorDetails{
"key": KEY_SCHEMA,
"type": TYPE_STRING,
},
))
}
schemaReference, err := gojsonreference.NewJsonReference(m[KEY_SCHEMA].(string))
if err != nil {
return "", nil, err
}
schema := schemaReference.String()
return schema, drafts.GetDraftVersion(schema), nil
}
return "", nil, nil
}

View File

@ -56,6 +56,11 @@ type (
ResultErrorFields
}
// ConstError. ErrorDetails: allowed
ConstError struct {
ResultErrorFields
}
// EnumError. ErrorDetails: allowed
EnumError struct {
ResultErrorFields
@ -76,11 +81,16 @@ type (
ResultErrorFields
}
// ItemsMustBeUniqueError. ErrorDetails: type
// ItemsMustBeUniqueError. ErrorDetails: type, i, j
ItemsMustBeUniqueError struct {
ResultErrorFields
}
// ArrayContainsError. ErrorDetails:
ArrayContainsError struct {
ResultErrorFields
}
// ArrayMinPropertiesError. ErrorDetails: min
ArrayMinPropertiesError struct {
ResultErrorFields
@ -101,6 +111,11 @@ type (
ResultErrorFields
}
// InvalidPopertyNameError. ErrorDetails: property
InvalidPropertyNameError struct {
ResultErrorFields
}
// StringLengthGTEError. ErrorDetails: min
StringLengthGTEError struct {
ResultErrorFields
@ -145,10 +160,20 @@ type (
NumberLTError struct {
ResultErrorFields
}
// ConditionThenError. ErrorDetails: -
ConditionThenError struct {
ResultErrorFields
}
// ConditionElseError. ErrorDetails: -
ConditionElseError struct {
ResultErrorFields
}
)
// newError takes a ResultError type and sets the type, context, description, details, value, and field
func newError(err ResultError, context *jsonContext, value interface{}, locale locale, details ErrorDetails) {
func newError(err ResultError, context *JsonContext, value interface{}, locale locale, details ErrorDetails) {
var t string
var d string
switch err.(type) {
@ -176,6 +201,9 @@ func newError(err ResultError, context *jsonContext, value interface{}, locale l
case *InternalError:
t = "internal"
d = locale.Internal()
case *ConstError:
t = "const"
d = locale.Const()
case *EnumError:
t = "enum"
d = locale.Enum()
@ -191,6 +219,9 @@ func newError(err ResultError, context *jsonContext, value interface{}, locale l
case *ItemsMustBeUniqueError:
t = "unique"
d = locale.Unique()
case *ArrayContainsError:
t = "contains"
d = locale.ArrayContains()
case *ArrayMinPropertiesError:
t = "array_min_properties"
d = locale.ArrayMinProperties()
@ -203,6 +234,9 @@ func newError(err ResultError, context *jsonContext, value interface{}, locale l
case *InvalidPropertyPatternError:
t = "invalid_property_pattern"
d = locale.InvalidPropertyPattern()
case *InvalidPropertyNameError:
t = "invalid_property_name"
d = locale.InvalidPropertyName()
case *StringLengthGTEError:
t = "string_gte"
d = locale.StringGTE()
@ -230,19 +264,26 @@ func newError(err ResultError, context *jsonContext, value interface{}, locale l
case *NumberLTError:
t = "number_lt"
d = locale.NumberLT()
case *ConditionThenError:
t = "condition_then"
d = locale.ConditionThen()
case *ConditionElseError:
t = "condition_else"
d = locale.ConditionElse()
}
err.SetType(t)
err.SetContext(context)
err.SetValue(value)
err.SetDetails(details)
err.SetDescriptionFormat(d)
details["field"] = err.Field()
if _, exists := details["context"]; !exists && context != nil {
details["context"] = context.String()
}
err.SetDescription(formatErrorDescription(d, details))
err.SetDescription(formatErrorDescription(err.DescriptionFormat(), details))
}
// formatErrorDescription takes a string in the default text/template

View File

@ -2,9 +2,11 @@ package gojsonschema
import (
"net"
"net/mail"
"net/url"
"regexp"
"strings"
"sync"
"time"
)
@ -51,12 +53,19 @@ type (
// http://tools.ietf.org/html/rfc3339#section-5.6
DateTimeFormatChecker struct{}
DateFormatChecker struct{}
TimeFormatChecker struct{}
// URIFormatChecker validates a URI with a valid Scheme per RFC3986
URIFormatChecker struct{}
// URIReferenceFormatChecker validates a URI or relative-reference per RFC3986
URIReferenceFormatChecker struct{}
// URITemplateFormatChecker validates a URI template per RFC6570
URITemplateFormatChecker struct{}
// HostnameFormatChecker validates a hostname is in the correct format
HostnameFormatChecker struct{}
@ -65,6 +74,12 @@ type (
// RegexFormatChecker validates a regex is in the correct format
RegexFormatChecker struct{}
// JSONPointerFormatChecker validates a JSON Pointer per RFC6901
JSONPointerFormatChecker struct{}
// RelativeJSONPointerFormatChecker validates a relative JSON Pointer is in the correct format
RelativeJSONPointerFormatChecker struct{}
)
var (
@ -72,45 +87,65 @@ var (
// so library users can add custom formatters
FormatCheckers = FormatCheckerChain{
formatters: map[string]FormatChecker{
"date-time": DateTimeFormatChecker{},
"hostname": HostnameFormatChecker{},
"email": EmailFormatChecker{},
"ipv4": IPV4FormatChecker{},
"ipv6": IPV6FormatChecker{},
"uri": URIFormatChecker{},
"uri-reference": URIReferenceFormatChecker{},
"uuid": UUIDFormatChecker{},
"regex": RegexFormatChecker{},
"date": DateFormatChecker{},
"time": TimeFormatChecker{},
"date-time": DateTimeFormatChecker{},
"hostname": HostnameFormatChecker{},
"email": EmailFormatChecker{},
"idn-email": EmailFormatChecker{},
"ipv4": IPV4FormatChecker{},
"ipv6": IPV6FormatChecker{},
"uri": URIFormatChecker{},
"uri-reference": URIReferenceFormatChecker{},
"iri": URIFormatChecker{},
"iri-reference": URIReferenceFormatChecker{},
"uri-template": URITemplateFormatChecker{},
"uuid": UUIDFormatChecker{},
"regex": RegexFormatChecker{},
"json-pointer": JSONPointerFormatChecker{},
"relative-json-pointer": RelativeJSONPointerFormatChecker{},
},
}
// Regex credit: https://github.com/asaskevich/govalidator
rxEmail = regexp.MustCompile("^(((([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|((\\x22)((((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(([\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(\\([\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(\\x22)))@((([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$")
// Regex credit: https://www.socketloop.com/tutorials/golang-validate-hostname
rxHostname = regexp.MustCompile(`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$`)
// Use a regex to make sure curly brackets are balanced properly after validating it as a AURI
rxURITemplate = regexp.MustCompile("^([^{]*({[^}]*})?)*$")
rxUUID = regexp.MustCompile("^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$")
rxJSONPointer = regexp.MustCompile("^(?:/(?:[^~/]|~0|~1)*)*$")
rxRelJSONPointer = regexp.MustCompile("^(?:0|[1-9][0-9]*)(?:#|(?:/(?:[^~/]|~0|~1)*)*)$")
lock = new(sync.Mutex)
)
// Add adds a FormatChecker to the FormatCheckerChain
// The name used will be the value used for the format key in your json schema
func (c *FormatCheckerChain) Add(name string, f FormatChecker) *FormatCheckerChain {
lock.Lock()
c.formatters[name] = f
lock.Unlock()
return c
}
// Remove deletes a FormatChecker from the FormatCheckerChain (if it exists)
func (c *FormatCheckerChain) Remove(name string) *FormatCheckerChain {
lock.Lock()
delete(c.formatters, name)
lock.Unlock()
return c
}
// Has checks to see if the FormatCheckerChain holds a FormatChecker with the given name
func (c *FormatCheckerChain) Has(name string) bool {
lock.Lock()
_, ok := c.formatters[name]
lock.Unlock()
return ok
}
@ -134,7 +169,9 @@ func (f EmailFormatChecker) IsFormat(input interface{}) bool {
return false
}
return rxEmail.MatchString(asString)
_, err := mail.ParseAddress(asString)
return err == nil
}
// Credit: https://github.com/asaskevich/govalidator
@ -185,6 +222,29 @@ func (f DateTimeFormatChecker) IsFormat(input interface{}) bool {
return false
}
func (f DateFormatChecker) IsFormat(input interface{}) bool {
asString, ok := input.(string)
if ok == false {
return false
}
_, err := time.Parse("2006-01-02", asString)
return err == nil
}
func (f TimeFormatChecker) IsFormat(input interface{}) bool {
asString, ok := input.(string)
if ok == false {
return false
}
if _, err := time.Parse("15:04:05Z07:00", asString); err == nil {
return true
}
_, err := time.Parse("15:04:05", asString)
return err == nil
}
func (f URIFormatChecker) IsFormat(input interface{}) bool {
asString, ok := input.(string)
@ -193,11 +253,12 @@ func (f URIFormatChecker) IsFormat(input interface{}) bool {
}
u, err := url.Parse(asString)
if err != nil || u.Scheme == "" {
return false
}
return true
return !strings.Contains(asString, `\`)
}
func (f URIReferenceFormatChecker) IsFormat(input interface{}) bool {
@ -208,7 +269,21 @@ func (f URIReferenceFormatChecker) IsFormat(input interface{}) bool {
}
_, err := url.Parse(asString)
return err == nil
return err == nil && !strings.Contains(asString, `\`)
}
func (f URITemplateFormatChecker) IsFormat(input interface{}) bool {
asString, ok := input.(string)
if ok == false {
return false
}
u, err := url.Parse(asString)
if err != nil || strings.Contains(asString, `\`) {
return false
}
return rxURITemplate.MatchString(u.Path)
}
func (f HostnameFormatChecker) IsFormat(input interface{}) bool {
@ -248,3 +323,21 @@ func (f RegexFormatChecker) IsFormat(input interface{}) bool {
}
return true
}
func (f JSONPointerFormatChecker) IsFormat(input interface{}) bool {
asString, ok := input.(string)
if ok == false {
return false
}
return rxJSONPointer.MatchString(asString)
}
func (f RelativeJSONPointerFormatChecker) IsFormat(input interface{}) bool {
asString, ok := input.(string)
if ok == false {
return false
}
return rxRelJSONPointer.MatchString(asString)
}

View File

@ -26,20 +26,20 @@ package gojsonschema
import "bytes"
// jsonContext implements a persistent linked-list of strings
type jsonContext struct {
// JsonContext implements a persistent linked-list of strings
type JsonContext struct {
head string
tail *jsonContext
tail *JsonContext
}
func newJsonContext(head string, tail *jsonContext) *jsonContext {
return &jsonContext{head, tail}
func NewJsonContext(head string, tail *JsonContext) *JsonContext {
return &JsonContext{head, tail}
}
// String displays the context in reverse.
// This plays well with the data structure's persistent nature with
// Cons and a json document's tree structure.
func (c *jsonContext) String(del ...string) string {
func (c *JsonContext) String(del ...string) string {
byteArr := make([]byte, 0, c.stringLen())
buf := bytes.NewBuffer(byteArr)
c.writeStringToBuffer(buf, del)
@ -47,7 +47,7 @@ func (c *jsonContext) String(del ...string) string {
return buf.String()
}
func (c *jsonContext) stringLen() int {
func (c *JsonContext) stringLen() int {
length := 0
if c.tail != nil {
length = c.tail.stringLen() + 1 // add 1 for "."
@ -57,7 +57,7 @@ func (c *jsonContext) stringLen() int {
return length
}
func (c *jsonContext) writeStringToBuffer(buf *bytes.Buffer, del []string) {
func (c *JsonContext) writeStringToBuffer(buf *bytes.Buffer, del []string) {
if c.tail != nil {
c.tail.writeStringToBuffer(buf, del)

View File

@ -38,7 +38,6 @@ import (
"runtime"
"strings"
"github.com/xeipuuv/gojsonreference"
)
@ -108,7 +107,7 @@ func (l *jsonReferenceLoader) LoaderFactory() JSONLoaderFactory {
}
// NewReferenceLoader returns a JSON reference loader using the given source and the local OS file system.
func NewReferenceLoader(source string) *jsonReferenceLoader {
func NewReferenceLoader(source string) JSONLoader {
return &jsonReferenceLoader{
fs: osFS,
source: source,
@ -116,7 +115,7 @@ func NewReferenceLoader(source string) *jsonReferenceLoader {
}
// NewReferenceLoaderFileSystem returns a JSON reference loader using the given source and file system.
func NewReferenceLoaderFileSystem(source string, fs http.FileSystem) *jsonReferenceLoader {
func NewReferenceLoaderFileSystem(source string, fs http.FileSystem) JSONLoader {
return &jsonReferenceLoader{
fs: fs,
source: source,
@ -139,13 +138,11 @@ func (l *jsonReferenceLoader) LoadJSON() (interface{}, error) {
if reference.HasFileScheme {
filename := strings.Replace(refToUrl.GetUrl().Path, "file://", "", -1)
filename := strings.TrimPrefix(refToUrl.String(), "file://")
if runtime.GOOS == "windows" {
// on Windows, a file URL may have an extra leading slash, use slashes
// instead of backslashes, and have spaces escaped
if strings.HasPrefix(filename, "/") {
filename = filename[1:]
}
filename = strings.TrimPrefix(filename, "/")
filename = filepath.FromSlash(filename)
}
@ -169,6 +166,12 @@ func (l *jsonReferenceLoader) LoadJSON() (interface{}, error) {
func (l *jsonReferenceLoader) loadFromHTTP(address string) (interface{}, error) {
// returned cached versions for metaschemas for drafts 4, 6 and 7
// for performance and allow for easier offline use
if metaSchema := drafts.GetMetaSchema(address); metaSchema != "" {
return decodeJsonUsingNumber(strings.NewReader(metaSchema))
}
resp, err := http.Get(address)
if err != nil {
return nil, err
@ -185,7 +188,6 @@ func (l *jsonReferenceLoader) loadFromHTTP(address string) (interface{}, error)
}
return decodeJsonUsingNumber(bytes.NewReader(bodyBuff))
}
func (l *jsonReferenceLoader) loadFromFile(path string) (interface{}, error) {
@ -222,7 +224,7 @@ func (l *jsonStringLoader) LoaderFactory() JSONLoaderFactory {
return &DefaultJSONLoaderFactory{}
}
func NewStringLoader(source string) *jsonStringLoader {
func NewStringLoader(source string) JSONLoader {
return &jsonStringLoader{source: source}
}
@ -250,7 +252,7 @@ func (l *jsonBytesLoader) LoaderFactory() JSONLoaderFactory {
return &DefaultJSONLoaderFactory{}
}
func NewBytesLoader(source []byte) *jsonBytesLoader {
func NewBytesLoader(source []byte) JSONLoader {
return &jsonBytesLoader{source: source}
}
@ -277,7 +279,7 @@ func (l *jsonGoLoader) LoaderFactory() JSONLoaderFactory {
return &DefaultJSONLoaderFactory{}
}
func NewGoLoader(source interface{}) *jsonGoLoader {
func NewGoLoader(source interface{}) JSONLoader {
return &jsonGoLoader{source: source}
}
@ -298,12 +300,12 @@ type jsonIOLoader struct {
buf *bytes.Buffer
}
func NewReaderLoader(source io.Reader) (*jsonIOLoader, io.Reader) {
func NewReaderLoader(source io.Reader) (JSONLoader, io.Reader) {
buf := &bytes.Buffer{}
return &jsonIOLoader{buf: buf}, io.TeeReader(source, buf)
}
func NewWriterLoader(source io.Writer) (*jsonIOLoader, io.Writer) {
func NewWriterLoader(source io.Writer) (JSONLoader, io.Writer) {
buf := &bytes.Buffer{}
return &jsonIOLoader{buf: buf}, io.MultiWriter(source, buf)
}
@ -324,6 +326,30 @@ func (l *jsonIOLoader) LoaderFactory() JSONLoaderFactory {
return &DefaultJSONLoaderFactory{}
}
// JSON raw loader
// In case the JSON is already marshalled to interface{} use this loader
// This is used for testing as otherwise there is no guarantee the JSON is marshalled
// "properly" by using https://golang.org/pkg/encoding/json/#Decoder.UseNumber
type jsonRawLoader struct {
source interface{}
}
func NewRawLoader(source interface{}) *jsonRawLoader {
return &jsonRawLoader{source: source}
}
func (l *jsonRawLoader) JsonSource() interface{} {
return l.source
}
func (l *jsonRawLoader) LoadJSON() (interface{}, error) {
return l.source, nil
}
func (l *jsonRawLoader) JsonReference() (gojsonreference.JsonReference, error) {
return gojsonreference.NewJsonReference("#")
}
func (l *jsonRawLoader) LoaderFactory() JSONLoaderFactory {
return &DefaultJSONLoaderFactory{}
}
func decodeJsonUsingNumber(r io.Reader) (interface{}, error) {
var document interface{}
@ -335,7 +361,7 @@ func decodeJsonUsingNumber(r io.Reader) (interface{}, error) {
if err != nil {
return nil, err
}
return document, nil
}

View File

@ -36,16 +36,19 @@ type (
NumberNot() string
MissingDependency() string
Internal() string
Const() string
Enum() string
ArrayNotEnoughItems() string
ArrayNoAdditionalItems() string
ArrayMinItems() string
ArrayMaxItems() string
Unique() string
ArrayContains() string
ArrayMinProperties() string
ArrayMaxProperties() string
AdditionalPropertyNotAllowed() string
InvalidPropertyPattern() string
InvalidPropertyName() string
StringGTE() string
StringLTE() string
DoesNotMatchPattern() string
@ -76,6 +79,9 @@ type (
HttpBadStatus() string
ParseError() string
ConditionThen() string
ConditionElse() string
// ErrorFormat
ErrorFormat() string
}
@ -116,6 +122,10 @@ func (l DefaultLocale) Internal() string {
return `Internal Error {{.error}}`
}
func (l DefaultLocale) Const() string {
return `{{.field}} does not match: {{.allowed}}`
}
func (l DefaultLocale) Enum() string {
return `{{.field}} must be one of the following: {{.allowed}}`
}
@ -137,7 +147,11 @@ func (l DefaultLocale) ArrayMaxItems() string {
}
func (l DefaultLocale) Unique() string {
return `{{.type}} items must be unique`
return `{{.type}} items[{{.i}},{{.j}}] must be unique`
}
func (l DefaultLocale) ArrayContains() string {
return `At least one of the items must match`
}
func (l DefaultLocale) ArrayMinProperties() string {
@ -156,6 +170,10 @@ func (l DefaultLocale) InvalidPropertyPattern() string {
return `Property "{{.property}}" does not match pattern {{.pattern}}`
}
func (l DefaultLocale) InvalidPropertyName() string {
return `Property name of "{{.property}}" does not match`
}
func (l DefaultLocale) StringGTE() string {
return `String length must be greater than or equal to {{.min}}`
}
@ -268,14 +286,23 @@ func (l DefaultLocale) ErrorFormat() string {
//Parse error
func (l DefaultLocale) ParseError() string {
return `Expected: %expected%, given: Invalid JSON`
return `Expected: {{.expected}}, given: Invalid JSON`
}
//If/Else
func (l DefaultLocale) ConditionThen() string {
return `Must validate "then" as "if" was valid`
}
func (l DefaultLocale) ConditionElse() string {
return `Must validate "else" as "if" was not valid`
}
const (
STRING_NUMBER = "number"
STRING_ARRAY_OF_STRINGS = "array of strings"
STRING_ARRAY_OF_SCHEMAS = "array of schemas"
STRING_SCHEMA = "schema"
STRING_SCHEMA = "valid schema"
STRING_SCHEMA_OR_ARRAY_OF_STRINGS = "schema or array of strings"
STRING_PROPERTIES = "properties"
STRING_DEPENDENCY = "dependency"

View File

@ -40,10 +40,12 @@ type (
Field() string
SetType(string)
Type() string
SetContext(*jsonContext)
Context() *jsonContext
SetContext(*JsonContext)
Context() *JsonContext
SetDescription(string)
Description() string
SetDescriptionFormat(string)
DescriptionFormat() string
SetValue(interface{})
Value() interface{}
SetDetails(ErrorDetails)
@ -55,11 +57,12 @@ type (
// ResultErrorFields implements the ResultError interface, so custom errors
// can be defined by just embedding this type
ResultErrorFields struct {
errorType string // A string with the type of error (i.e. invalid_type)
context *jsonContext // Tree like notation of the part that failed the validation. ex (root).a.b ...
description string // A human readable error message
value interface{} // Value given by the JSON file that is the source of the error
details ErrorDetails
errorType string // A string with the type of error (i.e. invalid_type)
context *JsonContext // Tree like notation of the part that failed the validation. ex (root).a.b ...
description string // A human readable error message
descriptionFormat string // A format for human readable error message
value interface{} // Value given by the JSON file that is the source of the error
details ErrorDetails
}
Result struct {
@ -73,12 +76,6 @@ type (
// Field outputs the field name without the root context
// i.e. firstName or person.firstName instead of (root).firstName or (root).person.firstName
func (v *ResultErrorFields) Field() string {
if p, ok := v.Details()["property"]; ok {
if str, isString := p.(string); isString {
return str
}
}
return strings.TrimPrefix(v.context.String(), STRING_ROOT_SCHEMA_PROPERTY+".")
}
@ -90,11 +87,11 @@ func (v *ResultErrorFields) Type() string {
return v.errorType
}
func (v *ResultErrorFields) SetContext(context *jsonContext) {
func (v *ResultErrorFields) SetContext(context *JsonContext) {
v.context = context
}
func (v *ResultErrorFields) Context() *jsonContext {
func (v *ResultErrorFields) Context() *JsonContext {
return v.context
}
@ -106,6 +103,14 @@ func (v *ResultErrorFields) Description() string {
return v.description
}
func (v *ResultErrorFields) SetDescriptionFormat(descriptionFormat string) {
v.descriptionFormat = descriptionFormat
}
func (v *ResultErrorFields) DescriptionFormat() string {
return v.descriptionFormat
}
func (v *ResultErrorFields) SetValue(value interface{}) {
v.value = value
}
@ -155,7 +160,19 @@ func (v *Result) Errors() []ResultError {
return v.errors
}
func (v *Result) addError(err ResultError, context *jsonContext, value interface{}, details ErrorDetails) {
// Add a fully filled error to the error set
// SetDescription() will be called with the result of the parsed err.DescriptionFormat()
func (v *Result) AddError(err ResultError, details ErrorDetails) {
if _, exists := details["context"]; !exists && err.Context() != nil {
details["context"] = err.Context().String()
}
err.SetDescription(formatErrorDescription(err.DescriptionFormat(), details))
v.errors = append(v.errors, err)
}
func (v *Result) addInternalError(err ResultError, context *JsonContext, value interface{}, details ErrorDetails) {
newError(err, context, value, Locale, details)
v.errors = append(v.errors, err)
v.score -= 2 // results in a net -1 when added to the +1 we get at the end of the validation function

View File

@ -27,8 +27,8 @@
package gojsonschema
import (
// "encoding/json"
"errors"
"math/big"
"reflect"
"regexp"
"text/template"
@ -46,39 +46,7 @@ var (
)
func NewSchema(l JSONLoader) (*Schema, error) {
ref, err := l.JsonReference()
if err != nil {
return nil, err
}
d := Schema{}
d.pool = newSchemaPool(l.LoaderFactory())
d.documentReference = ref
d.referencePool = newSchemaReferencePool()
var doc interface{}
if ref.String() != "" {
// Get document from schema pool
spd, err := d.pool.GetDocument(d.documentReference)
if err != nil {
return nil, err
}
doc = spd.Document
} else {
// Load JSON directly
doc, err = l.LoadJSON()
if err != nil {
return nil, err
}
d.pool.SetStandaloneDocument(doc)
}
err = d.parse(doc)
if err != nil {
return nil, err
}
return &d, nil
return NewSchemaLoader().Compile(l)
}
type Schema struct {
@ -88,8 +56,8 @@ type Schema struct {
referencePool *schemaReferencePool
}
func (d *Schema) parse(document interface{}) error {
d.rootSchema = &subSchema{property: STRING_ROOT_SCHEMA_PROPERTY}
func (d *Schema) parse(document interface{}, draft Draft) error {
d.rootSchema = &subSchema{property: STRING_ROOT_SCHEMA_PROPERTY, draft: &draft}
return d.parseSchema(document, d.rootSchema)
}
@ -105,6 +73,23 @@ func (d *Schema) SetRootSchemaName(name string) {
//
func (d *Schema) parseSchema(documentNode interface{}, currentSchema *subSchema) error {
if currentSchema.draft == nil {
if currentSchema.parent == nil {
return errors.New("Draft not set")
}
currentSchema.draft = currentSchema.parent.draft
}
// As of draft 6 "true" is equivalent to an empty schema "{}" and false equals "{"not":{}}"
if *currentSchema.draft >= Draft6 && isKind(documentNode, reflect.Bool) {
b := documentNode.(bool)
if b {
documentNode = map[string]interface{}{}
} else {
documentNode = map[string]interface{}{"not": true}
}
}
if !isKind(documentNode, reflect.Map) {
return errors.New(formatErrorDescription(
Locale.ParseError(),
@ -116,81 +101,67 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *subSchema)
m := documentNode.(map[string]interface{})
if currentSchema == d.rootSchema {
if currentSchema.parent == nil {
currentSchema.ref = &d.documentReference
currentSchema.id = &d.documentReference
}
// $subSchema
if existsMapKey(m, KEY_SCHEMA) {
if !isKind(m[KEY_SCHEMA], reflect.String) {
return errors.New(formatErrorDescription(
Locale.InvalidType(),
ErrorDetails{
"expected": TYPE_STRING,
"given": KEY_SCHEMA,
},
))
}
schemaRef := m[KEY_SCHEMA].(string)
schemaReference, err := gojsonreference.NewJsonReference(schemaRef)
currentSchema.subSchema = &schemaReference
if err != nil {
return err
}
if currentSchema.id == nil && currentSchema.parent != nil {
currentSchema.id = currentSchema.parent.id
}
// $ref
if existsMapKey(m, KEY_REF) && !isKind(m[KEY_REF], reflect.String) {
// In draft 6 the id keyword was renamed to $id
// Hybrid mode uses the old id by default
var keyID string
switch *currentSchema.draft {
case Draft4:
keyID = KEY_ID
case Hybrid:
keyID = KEY_ID_NEW
if existsMapKey(m, KEY_ID) {
keyID = KEY_ID
}
default:
keyID = KEY_ID_NEW
}
if existsMapKey(m, keyID) && !isKind(m[keyID], reflect.String) {
return errors.New(formatErrorDescription(
Locale.InvalidType(),
ErrorDetails{
"expected": TYPE_STRING,
"given": KEY_REF,
"given": keyID,
},
))
}
if k, ok := m[KEY_REF].(string); ok {
if k, ok := m[keyID].(string); ok {
jsonReference, err := gojsonreference.NewJsonReference(k)
if err != nil {
return err
}
if jsonReference.HasFullUrl {
currentSchema.ref = &jsonReference
if currentSchema == d.rootSchema {
currentSchema.id = &jsonReference
} else {
inheritedReference, err := currentSchema.ref.Inherits(jsonReference)
ref, err := currentSchema.parent.id.Inherits(jsonReference)
if err != nil {
return err
}
currentSchema.ref = inheritedReference
}
if sch, ok := d.referencePool.Get(currentSchema.ref.String() + k); ok {
currentSchema.refSchema = sch
} else {
err := d.parseReference(documentNode, currentSchema, k)
if err != nil {
return err
}
return nil
currentSchema.id = ref
}
}
// definitions
if existsMapKey(m, KEY_DEFINITIONS) {
if isKind(m[KEY_DEFINITIONS], reflect.Map) {
currentSchema.definitions = make(map[string]*subSchema)
for dk, dv := range m[KEY_DEFINITIONS].(map[string]interface{}) {
if isKind(dv, reflect.Map) {
newSchema := &subSchema{property: KEY_DEFINITIONS, parent: currentSchema, ref: currentSchema.ref}
currentSchema.definitions[dk] = newSchema
if isKind(m[KEY_DEFINITIONS], reflect.Map, reflect.Bool) {
for _, dv := range m[KEY_DEFINITIONS].(map[string]interface{}) {
if isKind(dv, reflect.Map, reflect.Bool) {
newSchema := &subSchema{property: KEY_DEFINITIONS, parent: currentSchema}
err := d.parseSchema(dv, newSchema)
if err != nil {
return errors.New(err.Error())
return err
}
} else {
return errors.New(formatErrorDescription(
@ -214,20 +185,6 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *subSchema)
}
// id
if existsMapKey(m, KEY_ID) && !isKind(m[KEY_ID], reflect.String) {
return errors.New(formatErrorDescription(
Locale.InvalidType(),
ErrorDetails{
"expected": TYPE_STRING,
"given": KEY_ID,
},
))
}
if k, ok := m[KEY_ID].(string); ok {
currentSchema.id = &k
}
// title
if existsMapKey(m, KEY_TITLE) && !isKind(m[KEY_TITLE], reflect.String) {
return errors.New(formatErrorDescription(
@ -256,6 +213,39 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *subSchema)
currentSchema.description = &k
}
// $ref
if existsMapKey(m, KEY_REF) && !isKind(m[KEY_REF], reflect.String) {
return errors.New(formatErrorDescription(
Locale.InvalidType(),
ErrorDetails{
"expected": TYPE_STRING,
"given": KEY_REF,
},
))
}
if k, ok := m[KEY_REF].(string); ok {
jsonReference, err := gojsonreference.NewJsonReference(k)
if err != nil {
return err
}
currentSchema.ref = &jsonReference
if sch, ok := d.referencePool.Get(currentSchema.ref.String()); ok {
currentSchema.refSchema = sch
} else {
err := d.parseReference(documentNode, currentSchema)
if err != nil {
return err
}
return nil
}
}
// type
if existsMapKey(m, KEY_TYPE) {
if isKind(m[KEY_TYPE], reflect.String) {
@ -357,6 +347,26 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *subSchema)
}
}
// propertyNames
if existsMapKey(m, KEY_PROPERTY_NAMES) && *currentSchema.draft >= Draft6 {
if isKind(m[KEY_PROPERTY_NAMES], reflect.Map, reflect.Bool) {
newSchema := &subSchema{property: KEY_PROPERTY_NAMES, parent: currentSchema, ref: currentSchema.ref}
currentSchema.propertyNames = newSchema
err := d.parseSchema(m[KEY_PROPERTY_NAMES], newSchema)
if err != nil {
return err
}
} else {
return errors.New(formatErrorDescription(
Locale.InvalidType(),
ErrorDetails{
"expected": STRING_SCHEMA,
"given": KEY_PATTERN_PROPERTIES,
},
))
}
}
// dependencies
if existsMapKey(m, KEY_DEPENDENCIES) {
err := d.parseDependencies(m[KEY_DEPENDENCIES], currentSchema)
@ -369,7 +379,7 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *subSchema)
if existsMapKey(m, KEY_ITEMS) {
if isKind(m[KEY_ITEMS], reflect.Slice) {
for _, itemElement := range m[KEY_ITEMS].([]interface{}) {
if isKind(itemElement, reflect.Map) {
if isKind(itemElement, reflect.Map, reflect.Bool) {
newSchema := &subSchema{parent: currentSchema, property: KEY_ITEMS}
newSchema.ref = currentSchema.ref
currentSchema.AddItemsChild(newSchema)
@ -388,7 +398,7 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *subSchema)
}
currentSchema.itemsChildrenIsSingleSchema = false
}
} else if isKind(m[KEY_ITEMS], reflect.Map) {
} else if isKind(m[KEY_ITEMS], reflect.Map, reflect.Bool) {
newSchema := &subSchema{parent: currentSchema, property: KEY_ITEMS}
newSchema.ref = currentSchema.ref
currentSchema.AddItemsChild(newSchema)
@ -443,7 +453,7 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *subSchema)
},
))
}
if *multipleOfValue <= 0 {
if multipleOfValue.Cmp(big.NewRat(0, 1)) <= 0 {
return errors.New(formatErrorDescription(
Locale.GreaterThanZero(),
ErrorDetails{"number": KEY_MULTIPLE_OF},
@ -464,20 +474,62 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *subSchema)
}
if existsMapKey(m, KEY_EXCLUSIVE_MINIMUM) {
if isKind(m[KEY_EXCLUSIVE_MINIMUM], reflect.Bool) {
switch *currentSchema.draft {
case Draft4:
if !isKind(m[KEY_EXCLUSIVE_MINIMUM], reflect.Bool) {
return errors.New(formatErrorDescription(
Locale.InvalidType(),
ErrorDetails{
"expected": TYPE_BOOLEAN,
"given": KEY_EXCLUSIVE_MINIMUM,
},
))
}
if currentSchema.minimum == nil {
return errors.New(formatErrorDescription(
Locale.CannotBeUsedWithout(),
ErrorDetails{"x": KEY_EXCLUSIVE_MINIMUM, "y": KEY_MINIMUM},
))
}
exclusiveMinimumValue := m[KEY_EXCLUSIVE_MINIMUM].(bool)
currentSchema.exclusiveMinimum = exclusiveMinimumValue
} else {
return errors.New(formatErrorDescription(
Locale.MustBeOfA(),
ErrorDetails{"x": KEY_EXCLUSIVE_MINIMUM, "y": TYPE_BOOLEAN},
))
if m[KEY_EXCLUSIVE_MINIMUM].(bool) {
currentSchema.exclusiveMinimum = currentSchema.minimum
currentSchema.minimum = nil
}
case Hybrid:
if isKind(m[KEY_EXCLUSIVE_MINIMUM], reflect.Bool) {
if currentSchema.minimum == nil {
return errors.New(formatErrorDescription(
Locale.CannotBeUsedWithout(),
ErrorDetails{"x": KEY_EXCLUSIVE_MINIMUM, "y": KEY_MINIMUM},
))
}
if m[KEY_EXCLUSIVE_MINIMUM].(bool) {
currentSchema.exclusiveMinimum = currentSchema.minimum
currentSchema.minimum = nil
}
} else if isJsonNumber(m[KEY_EXCLUSIVE_MINIMUM]) {
currentSchema.exclusiveMinimum = mustBeNumber(m[KEY_EXCLUSIVE_MINIMUM])
} else {
return errors.New(formatErrorDescription(
Locale.InvalidType(),
ErrorDetails{
"expected": TYPE_BOOLEAN + "/" + TYPE_NUMBER,
"given": KEY_EXCLUSIVE_MINIMUM,
},
))
}
default:
if isJsonNumber(m[KEY_EXCLUSIVE_MINIMUM]) {
currentSchema.exclusiveMinimum = mustBeNumber(m[KEY_EXCLUSIVE_MINIMUM])
} else {
return errors.New(formatErrorDescription(
Locale.InvalidType(),
ErrorDetails{
"expected": TYPE_NUMBER,
"given": KEY_EXCLUSIVE_MINIMUM,
},
))
}
}
}
@ -493,29 +545,62 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *subSchema)
}
if existsMapKey(m, KEY_EXCLUSIVE_MAXIMUM) {
if isKind(m[KEY_EXCLUSIVE_MAXIMUM], reflect.Bool) {
switch *currentSchema.draft {
case Draft4:
if !isKind(m[KEY_EXCLUSIVE_MAXIMUM], reflect.Bool) {
return errors.New(formatErrorDescription(
Locale.InvalidType(),
ErrorDetails{
"expected": TYPE_BOOLEAN,
"given": KEY_EXCLUSIVE_MAXIMUM,
},
))
}
if currentSchema.maximum == nil {
return errors.New(formatErrorDescription(
Locale.CannotBeUsedWithout(),
ErrorDetails{"x": KEY_EXCLUSIVE_MAXIMUM, "y": KEY_MAXIMUM},
))
}
exclusiveMaximumValue := m[KEY_EXCLUSIVE_MAXIMUM].(bool)
currentSchema.exclusiveMaximum = exclusiveMaximumValue
} else {
return errors.New(formatErrorDescription(
Locale.MustBeOfA(),
ErrorDetails{"x": KEY_EXCLUSIVE_MAXIMUM, "y": STRING_NUMBER},
))
}
}
if currentSchema.minimum != nil && currentSchema.maximum != nil {
if *currentSchema.minimum > *currentSchema.maximum {
return errors.New(formatErrorDescription(
Locale.CannotBeGT(),
ErrorDetails{"x": KEY_MINIMUM, "y": KEY_MAXIMUM},
))
if m[KEY_EXCLUSIVE_MAXIMUM].(bool) {
currentSchema.exclusiveMaximum = currentSchema.maximum
currentSchema.maximum = nil
}
case Hybrid:
if isKind(m[KEY_EXCLUSIVE_MAXIMUM], reflect.Bool) {
if currentSchema.maximum == nil {
return errors.New(formatErrorDescription(
Locale.CannotBeUsedWithout(),
ErrorDetails{"x": KEY_EXCLUSIVE_MAXIMUM, "y": KEY_MAXIMUM},
))
}
if m[KEY_EXCLUSIVE_MAXIMUM].(bool) {
currentSchema.exclusiveMaximum = currentSchema.maximum
currentSchema.maximum = nil
}
} else if isJsonNumber(m[KEY_EXCLUSIVE_MAXIMUM]) {
currentSchema.exclusiveMaximum = mustBeNumber(m[KEY_EXCLUSIVE_MAXIMUM])
} else {
return errors.New(formatErrorDescription(
Locale.InvalidType(),
ErrorDetails{
"expected": TYPE_BOOLEAN + "/" + TYPE_NUMBER,
"given": KEY_EXCLUSIVE_MAXIMUM,
},
))
}
default:
if isJsonNumber(m[KEY_EXCLUSIVE_MAXIMUM]) {
currentSchema.exclusiveMaximum = mustBeNumber(m[KEY_EXCLUSIVE_MAXIMUM])
} else {
return errors.New(formatErrorDescription(
Locale.InvalidType(),
ErrorDetails{
"expected": TYPE_NUMBER,
"given": KEY_EXCLUSIVE_MAXIMUM,
},
))
}
}
}
@ -586,11 +671,6 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *subSchema)
formatString, ok := m[KEY_FORMAT].(string)
if ok && FormatCheckers.Has(formatString) {
currentSchema.format = formatString
} else {
return errors.New(formatErrorDescription(
Locale.MustBeValidFormat(),
ErrorDetails{"key": KEY_FORMAT, "given": m[KEY_FORMAT]},
))
}
}
@ -710,8 +790,24 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *subSchema)
}
}
if existsMapKey(m, KEY_CONTAINS) && *currentSchema.draft >= Draft6 {
newSchema := &subSchema{property: KEY_CONTAINS, parent: currentSchema, ref: currentSchema.ref}
currentSchema.contains = newSchema
err := d.parseSchema(m[KEY_CONTAINS], newSchema)
if err != nil {
return err
}
}
// validation : all
if existsMapKey(m, KEY_CONST) && *currentSchema.draft >= Draft6 {
err := currentSchema.AddConst(m[KEY_CONST])
if err != nil {
return err
}
}
if existsMapKey(m, KEY_ENUM) {
if isKind(m[KEY_ENUM], reflect.Slice) {
for _, v := range m[KEY_ENUM].([]interface{}) {
@ -785,7 +881,7 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *subSchema)
}
if existsMapKey(m, KEY_NOT) {
if isKind(m[KEY_NOT], reflect.Map) {
if isKind(m[KEY_NOT], reflect.Map, reflect.Bool) {
newSchema := &subSchema{property: KEY_NOT, parent: currentSchema, ref: currentSchema.ref}
currentSchema.SetNot(newSchema)
err := d.parseSchema(m[KEY_NOT], newSchema)
@ -800,48 +896,91 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *subSchema)
}
}
if *currentSchema.draft >= Draft7 {
if existsMapKey(m, KEY_IF) {
if isKind(m[KEY_IF], reflect.Map, reflect.Bool) {
newSchema := &subSchema{property: KEY_IF, parent: currentSchema, ref: currentSchema.ref}
currentSchema.SetIf(newSchema)
err := d.parseSchema(m[KEY_IF], newSchema)
if err != nil {
return err
}
} else {
return errors.New(formatErrorDescription(
Locale.MustBeOfAn(),
ErrorDetails{"x": KEY_IF, "y": TYPE_OBJECT},
))
}
}
if existsMapKey(m, KEY_THEN) {
if isKind(m[KEY_THEN], reflect.Map, reflect.Bool) {
newSchema := &subSchema{property: KEY_THEN, parent: currentSchema, ref: currentSchema.ref}
currentSchema.SetThen(newSchema)
err := d.parseSchema(m[KEY_THEN], newSchema)
if err != nil {
return err
}
} else {
return errors.New(formatErrorDescription(
Locale.MustBeOfAn(),
ErrorDetails{"x": KEY_THEN, "y": TYPE_OBJECT},
))
}
}
if existsMapKey(m, KEY_ELSE) {
if isKind(m[KEY_ELSE], reflect.Map, reflect.Bool) {
newSchema := &subSchema{property: KEY_ELSE, parent: currentSchema, ref: currentSchema.ref}
currentSchema.SetElse(newSchema)
err := d.parseSchema(m[KEY_ELSE], newSchema)
if err != nil {
return err
}
} else {
return errors.New(formatErrorDescription(
Locale.MustBeOfAn(),
ErrorDetails{"x": KEY_ELSE, "y": TYPE_OBJECT},
))
}
}
}
return nil
}
func (d *Schema) parseReference(documentNode interface{}, currentSchema *subSchema, reference string) error {
var refdDocumentNode interface{}
jsonPointer := currentSchema.ref.GetPointer()
standaloneDocument := d.pool.GetStandaloneDocument()
func (d *Schema) parseReference(documentNode interface{}, currentSchema *subSchema) error {
var (
refdDocumentNode interface{}
dsp *schemaPoolDocument
err error
)
if standaloneDocument != nil {
newSchema := &subSchema{property: KEY_REF, parent: currentSchema, ref: currentSchema.ref}
var err error
refdDocumentNode, _, err = jsonPointer.Get(standaloneDocument)
if err != nil {
return err
}
d.referencePool.Add(currentSchema.ref.String(), newSchema)
} else {
dsp, err := d.pool.GetDocument(*currentSchema.ref)
if err != nil {
return err
}
dsp, err = d.pool.GetDocument(*currentSchema.ref)
if err != nil {
return err
}
newSchema.id = currentSchema.ref
refdDocumentNode, _, err = jsonPointer.Get(dsp.Document)
if err != nil {
return err
}
refdDocumentNode = dsp.Document
newSchema.draft = dsp.Draft
if err != nil {
return err
}
if !isKind(refdDocumentNode, reflect.Map) {
if !isKind(refdDocumentNode, reflect.Map, reflect.Bool) {
return errors.New(formatErrorDescription(
Locale.MustBeOfType(),
ErrorDetails{"key": STRING_SCHEMA, "type": TYPE_OBJECT},
))
}
// returns the loaded referenced subSchema for the caller to update its current subSchema
newSchemaDocument := refdDocumentNode.(map[string]interface{})
newSchema := &subSchema{property: KEY_REF, parent: currentSchema, ref: currentSchema.ref}
d.referencePool.Add(currentSchema.ref.String()+reference, newSchema)
err := d.parseSchema(newSchemaDocument, newSchema)
err = d.parseSchema(refdDocumentNode, newSchema)
if err != nil {
return err
}
@ -909,7 +1048,7 @@ func (d *Schema) parseDependencies(documentNode interface{}, currentSchema *subS
currentSchema.dependencies[k] = valuesToRegister
}
case reflect.Map:
case reflect.Map, reflect.Bool:
depSchema := &subSchema{property: k, parent: currentSchema, ref: currentSchema.ref}
err := d.parseSchema(m[k], depSchema)
if err != nil {

203
vendor/github.com/xeipuuv/gojsonschema/schemaLoader.go generated vendored Normal file
View File

@ -0,0 +1,203 @@
// Copyright 2018 johandorland ( https://github.com/johandorland )
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gojsonschema
import (
"bytes"
"errors"
"github.com/xeipuuv/gojsonreference"
)
type SchemaLoader struct {
pool *schemaPool
AutoDetect bool
Validate bool
Draft Draft
}
func NewSchemaLoader() *SchemaLoader {
ps := &SchemaLoader{
pool: &schemaPool{
schemaPoolDocuments: make(map[string]*schemaPoolDocument),
},
AutoDetect: true,
Validate: false,
Draft: Hybrid,
}
ps.pool.autoDetect = &ps.AutoDetect
return ps
}
func (sl *SchemaLoader) validateMetaschema(documentNode interface{}) error {
var (
schema string
err error
)
if sl.AutoDetect {
schema, _, err = parseSchemaURL(documentNode)
if err != nil {
return err
}
}
// If no explicit "$schema" is used, use the default metaschema associated with the draft used
if schema == "" {
if sl.Draft == Hybrid {
return nil
}
schema = drafts.GetSchemaURL(sl.Draft)
}
//Disable validation when loading the metaschema to prevent an infinite recursive loop
sl.Validate = false
metaSchema, err := sl.Compile(NewReferenceLoader(schema))
if err != nil {
return err
}
sl.Validate = true
result := metaSchema.validateDocument(documentNode)
if !result.Valid() {
var res bytes.Buffer
for _, err := range result.Errors() {
res.WriteString(err.String())
res.WriteString("\n")
}
return errors.New(res.String())
}
return nil
}
// AddSchemas adds an arbritrary amount of schemas to the schema cache. As this function does not require
// an explicit URL, every schema should contain an $id, so that it can be referenced by the main schema
func (sl *SchemaLoader) AddSchemas(loaders ...JSONLoader) error {
emptyRef, _ := gojsonreference.NewJsonReference("")
for _, loader := range loaders {
doc, err := loader.LoadJSON()
if err != nil {
return err
}
if sl.Validate {
if err := sl.validateMetaschema(doc); err != nil {
return err
}
}
// Directly use the Recursive function, so that it get only added to the schema pool by $id
// and not by the ref of the document as it's empty
if err = sl.pool.parseReferences(doc, emptyRef, false); err != nil {
return err
}
}
return nil
}
//AddSchema adds a schema under the provided URL to the schema cache
func (sl *SchemaLoader) AddSchema(url string, loader JSONLoader) error {
ref, err := gojsonreference.NewJsonReference(url)
if err != nil {
return err
}
doc, err := loader.LoadJSON()
if err != nil {
return err
}
if sl.Validate {
if err := sl.validateMetaschema(doc); err != nil {
return err
}
}
return sl.pool.parseReferences(doc, ref, true)
}
func (sl *SchemaLoader) Compile(rootSchema JSONLoader) (*Schema, error) {
ref, err := rootSchema.JsonReference()
if err != nil {
return nil, err
}
d := Schema{}
d.pool = sl.pool
d.pool.jsonLoaderFactory = rootSchema.LoaderFactory()
d.documentReference = ref
d.referencePool = newSchemaReferencePool()
var doc interface{}
if ref.String() != "" {
// Get document from schema pool
spd, err := d.pool.GetDocument(d.documentReference)
if err != nil {
return nil, err
}
doc = spd.Document
} else {
// Load JSON directly
doc, err = rootSchema.LoadJSON()
if err != nil {
return nil, err
}
// References need only be parsed if loading JSON directly
// as pool.GetDocument already does this for us if loading by reference
err = sl.pool.parseReferences(doc, ref, true)
if err != nil {
return nil, err
}
}
if sl.Validate {
if err := sl.validateMetaschema(doc); err != nil {
return nil, err
}
}
draft := sl.Draft
if sl.AutoDetect {
_, detectedDraft, err := parseSchemaURL(doc)
if err != nil {
return nil, err
}
if detectedDraft != nil {
draft = *detectedDraft
}
}
err = d.parse(doc, draft)
if err != nil {
return nil, err
}
return &d, nil
}

View File

@ -28,82 +28,188 @@ package gojsonschema
import (
"errors"
"fmt"
"reflect"
"github.com/xeipuuv/gojsonreference"
)
type schemaPoolDocument struct {
Document interface{}
Draft *Draft
}
type schemaPool struct {
schemaPoolDocuments map[string]*schemaPoolDocument
standaloneDocument interface{}
jsonLoaderFactory JSONLoaderFactory
autoDetect *bool
}
func newSchemaPool(f JSONLoaderFactory) *schemaPool {
func (p *schemaPool) parseReferences(document interface{}, ref gojsonreference.JsonReference, pooled bool) error {
p := &schemaPool{}
p.schemaPoolDocuments = make(map[string]*schemaPoolDocument)
p.standaloneDocument = nil
p.jsonLoaderFactory = f
var (
draft *Draft
err error
reference = ref.String()
)
// Only the root document should be added to the schema pool if pooled is true
if _, ok := p.schemaPoolDocuments[reference]; pooled && ok {
return fmt.Errorf("Reference already exists: \"%s\"", reference)
}
return p
if *p.autoDetect {
_, draft, err = parseSchemaURL(document)
if err != nil {
return err
}
}
err = p.parseReferencesRecursive(document, ref, draft)
if pooled {
p.schemaPoolDocuments[reference] = &schemaPoolDocument{Document: document, Draft: draft}
}
return err
}
func (p *schemaPool) SetStandaloneDocument(document interface{}) {
p.standaloneDocument = document
}
func (p *schemaPool) parseReferencesRecursive(document interface{}, ref gojsonreference.JsonReference, draft *Draft) error {
// parseReferencesRecursive parses a JSON document and resolves all $id and $ref references.
// For $ref references it takes into account the $id scope it is in and replaces
// the reference by the absolute resolved reference
func (p *schemaPool) GetStandaloneDocument() (document interface{}) {
return p.standaloneDocument
// When encountering errors it fails silently. Error handling is done when the schema
// is syntactically parsed and any error encountered here should also come up there.
switch m := document.(type) {
case []interface{}:
for _, v := range m {
p.parseReferencesRecursive(v, ref, draft)
}
case map[string]interface{}:
localRef := &ref
keyID := KEY_ID_NEW
if existsMapKey(m, KEY_ID) {
keyID = KEY_ID
}
if existsMapKey(m, keyID) && isKind(m[keyID], reflect.String) {
jsonReference, err := gojsonreference.NewJsonReference(m[keyID].(string))
if err == nil {
localRef, err = ref.Inherits(jsonReference)
if err == nil {
if _, ok := p.schemaPoolDocuments[localRef.String()]; ok {
return fmt.Errorf("Reference already exists: \"%s\"", localRef.String())
}
p.schemaPoolDocuments[localRef.String()] = &schemaPoolDocument{Document: document, Draft: draft}
}
}
}
if existsMapKey(m, KEY_REF) && isKind(m[KEY_REF], reflect.String) {
jsonReference, err := gojsonreference.NewJsonReference(m[KEY_REF].(string))
if err == nil {
absoluteRef, err := localRef.Inherits(jsonReference)
if err == nil {
m[KEY_REF] = absoluteRef.String()
}
}
}
for k, v := range m {
// const and enums should be interpreted literally, so ignore them
if k == KEY_CONST || k == KEY_ENUM {
continue
}
// Something like a property or a dependency is not a valid schema, as it might describe properties named "$ref", "$id" or "const", etc
// Therefore don't treat it like a schema.
if k == KEY_PROPERTIES || k == KEY_DEPENDENCIES || k == KEY_PATTERN_PROPERTIES {
if child, ok := v.(map[string]interface{}); ok {
for _, v := range child {
p.parseReferencesRecursive(v, *localRef, draft)
}
}
} else {
p.parseReferencesRecursive(v, *localRef, draft)
}
}
}
return nil
}
func (p *schemaPool) GetDocument(reference gojsonreference.JsonReference) (*schemaPoolDocument, error) {
var (
spd *schemaPoolDocument
draft *Draft
ok bool
err error
)
if internalLogEnabled {
internalLog("Get Document ( %s )", reference.String())
}
var err error
// Create a deep copy, so we can remove the fragment part later on without altering the original
refToUrl, _ := gojsonreference.NewJsonReference(reference.String())
// It is not possible to load anything that is not canonical...
if !reference.IsCanonical() {
return nil, errors.New(formatErrorDescription(
Locale.ReferenceMustBeCanonical(),
ErrorDetails{"reference": reference},
))
}
// First check if the given fragment is a location independent identifier
// http://json-schema.org/latest/json-schema-core.html#rfc.section.8.2.3
refToUrl := reference
refToUrl.GetUrl().Fragment = ""
var spd *schemaPoolDocument
// Try to find the requested document in the pool
for k := range p.schemaPoolDocuments {
if k == refToUrl.String() {
spd = p.schemaPoolDocuments[k]
}
}
if spd != nil {
if spd, ok = p.schemaPoolDocuments[refToUrl.String()]; ok {
if internalLogEnabled {
internalLog(" From pool")
}
return spd, nil
}
// If the given reference is not a location independent identifier,
// strip the fragment and look for a document with it's base URI
refToUrl.GetUrl().Fragment = ""
if cachedSpd, ok := p.schemaPoolDocuments[refToUrl.String()]; ok {
document, _, err := reference.GetPointer().Get(cachedSpd.Document)
if err != nil {
return nil, err
}
if internalLogEnabled {
internalLog(" From pool")
}
spd = &schemaPoolDocument{Document: document, Draft: cachedSpd.Draft}
p.schemaPoolDocuments[reference.String()] = spd
return spd, nil
}
// It is not possible to load anything remotely that is not canonical...
if !reference.IsCanonical() {
return nil, errors.New(formatErrorDescription(
Locale.ReferenceMustBeCanonical(),
ErrorDetails{"reference": reference.String()},
))
}
jsonReferenceLoader := p.jsonLoaderFactory.New(reference.String())
document, err := jsonReferenceLoader.LoadJSON()
if err != nil {
return nil, err
}
spd = &schemaPoolDocument{Document: document}
// add the document to the pool for potential later use
p.schemaPoolDocuments[refToUrl.String()] = spd
// add the whole document to the pool for potential re-use
p.parseReferences(document, refToUrl, true)
return spd, nil
_, draft, _ = parseSchemaURL(document)
// resolve the potential fragment and also cache it
document, _, err = reference.GetPointer().Get(document)
if err != nil {
return nil, err
}
return &schemaPoolDocument{Document: document, Draft: draft}, nil
}

View File

@ -62,6 +62,7 @@ func (p *schemaReferencePool) Add(ref string, sch *subSchema) {
if internalLogEnabled {
internalLog(fmt.Sprintf("Add Schema Reference %s to pool", ref))
}
p.documents[ref] = sch
if _, ok := p.documents[ref]; !ok {
p.documents[ref] = sch
}
}

View File

@ -28,6 +28,7 @@ package gojsonschema
import (
"errors"
"math/big"
"regexp"
"strings"
@ -35,8 +36,9 @@ import (
)
const (
KEY_SCHEMA = "$subSchema"
KEY_ID = "$id"
KEY_SCHEMA = "$schema"
KEY_ID = "id"
KEY_ID_NEW = "$id"
KEY_REF = "$ref"
KEY_TITLE = "title"
KEY_DESCRIPTION = "description"
@ -46,6 +48,7 @@ const (
KEY_PROPERTIES = "properties"
KEY_PATTERN_PROPERTIES = "patternProperties"
KEY_ADDITIONAL_PROPERTIES = "additionalProperties"
KEY_PROPERTY_NAMES = "propertyNames"
KEY_DEFINITIONS = "definitions"
KEY_MULTIPLE_OF = "multipleOf"
KEY_MINIMUM = "minimum"
@ -63,17 +66,23 @@ const (
KEY_MIN_ITEMS = "minItems"
KEY_MAX_ITEMS = "maxItems"
KEY_UNIQUE_ITEMS = "uniqueItems"
KEY_CONTAINS = "contains"
KEY_CONST = "const"
KEY_ENUM = "enum"
KEY_ONE_OF = "oneOf"
KEY_ANY_OF = "anyOf"
KEY_ALL_OF = "allOf"
KEY_NOT = "not"
KEY_IF = "if"
KEY_THEN = "then"
KEY_ELSE = "else"
)
type subSchema struct {
draft *Draft
// basic subSchema meta properties
id *string
id *gojsonreference.JsonReference
title *string
description *string
@ -86,23 +95,19 @@ type subSchema struct {
ref *gojsonreference.JsonReference
// Schema referenced
refSchema *subSchema
// Json reference
subSchema *gojsonreference.JsonReference
// hierarchy
parent *subSchema
definitions map[string]*subSchema
definitionsChildren []*subSchema
itemsChildren []*subSchema
itemsChildrenIsSingleSchema bool
propertiesChildren []*subSchema
// validation : number / integer
multipleOf *float64
maximum *float64
exclusiveMaximum bool
minimum *float64
exclusiveMinimum bool
multipleOf *big.Rat
maximum *big.Rat
exclusiveMaximum *big.Rat
minimum *big.Rat
exclusiveMinimum *big.Rat
// validation : string
minLength *int
@ -118,27 +123,43 @@ type subSchema struct {
dependencies map[string]interface{}
additionalProperties interface{}
patternProperties map[string]*subSchema
propertyNames *subSchema
// validation : array
minItems *int
maxItems *int
uniqueItems bool
contains *subSchema
additionalItems interface{}
// validation : all
enum []string
_const *string //const is a golang keyword
enum []string
// validation : subSchema
oneOf []*subSchema
anyOf []*subSchema
allOf []*subSchema
not *subSchema
_if *subSchema // if/else are golang keywords
_then *subSchema
_else *subSchema
}
func (s *subSchema) AddConst(i interface{}) error {
is, err := marshalWithoutNumber(i)
if err != nil {
return err
}
s._const = is
return nil
}
func (s *subSchema) AddEnum(i interface{}) error {
is, err := marshalToJsonString(i)
is, err := marshalWithoutNumber(i)
if err != nil {
return err
}
@ -157,7 +178,7 @@ func (s *subSchema) AddEnum(i interface{}) error {
func (s *subSchema) ContainsEnum(i interface{}) (bool, error) {
is, err := marshalToJsonString(i)
is, err := marshalWithoutNumber(i)
if err != nil {
return false, err
}
@ -181,6 +202,18 @@ func (s *subSchema) SetNot(subSchema *subSchema) {
s.not = subSchema
}
func (s *subSchema) SetIf(subSchema *subSchema) {
s._if = subSchema
}
func (s *subSchema) SetThen(subSchema *subSchema) {
s._then = subSchema
}
func (s *subSchema) SetElse(subSchema *subSchema) {
s._else = subSchema
}
func (s *subSchema) AddRequired(value string) error {
if isStringInSlice(s.required, value) {
@ -195,10 +228,6 @@ func (s *subSchema) AddRequired(value string) error {
return nil
}
func (s *subSchema) AddDefinitionChild(child *subSchema) {
s.definitionsChildren = append(s.definitionsChildren, child)
}
func (s *subSchema) AddItemsChild(child *subSchema) {
s.itemsChildren = append(s.itemsChildren, child)
}

View File

@ -29,17 +29,23 @@ import (
"encoding/json"
"fmt"
"math"
"math/big"
"reflect"
"strconv"
)
func isKind(what interface{}, kind reflect.Kind) bool {
func isKind(what interface{}, kinds ...reflect.Kind) bool {
target := what
if isJsonNumber(what) {
// JSON Numbers are strings!
target = *mustBeNumber(what)
}
return reflect.ValueOf(target).Kind() == kind
targetKind := reflect.ValueOf(target).Kind()
for _, kind := range kinds {
if targetKind == kind {
return true
}
}
return false
}
func existsMapKey(m map[string]interface{}, k string) bool {
@ -56,6 +62,16 @@ func isStringInSlice(s []string, what string) bool {
return false
}
// indexStringInSlice returns the index of the first instance of 'what' in s or -1 if it is not found in s.
func indexStringInSlice(s []string, what string) int {
for i := range s {
if s[i] == what {
return i
}
}
return -1
}
func marshalToJsonString(value interface{}) (*string, error) {
mBytes, err := json.Marshal(value)
@ -67,6 +83,28 @@ func marshalToJsonString(value interface{}) (*string, error) {
return &sBytes, nil
}
func marshalWithoutNumber(value interface{}) (*string, error) {
// The JSON is decoded using https://golang.org/pkg/encoding/json/#Decoder.UseNumber
// This means the numbers are internally still represented as strings and therefore 1.00 is unequal to 1
// One way to eliminate these differences is to decode and encode the JSON one more time without Decoder.UseNumber
// so that these differences in representation are removed
jsonString, err := marshalToJsonString(value)
if err != nil {
return nil, err
}
var document interface{}
err = json.Unmarshal([]byte(*jsonString), &document)
if err != nil {
return nil, err
}
return marshalToJsonString(document)
}
func isJsonNumber(what interface{}) bool {
switch what.(type) {
@ -78,21 +116,13 @@ func isJsonNumber(what interface{}) bool {
return false
}
func checkJsonNumber(what interface{}) (isValidFloat64 bool, isValidInt64 bool, isValidInt32 bool) {
func checkJsonInteger(what interface{}) (isInt bool) {
jsonNumber := what.(json.Number)
f64, errFloat64 := jsonNumber.Float64()
s64 := strconv.FormatFloat(f64, 'f', -1, 64)
_, errInt64 := strconv.ParseInt(s64, 10, 64)
bigFloat, isValidNumber := new(big.Rat).SetString(string(jsonNumber))
isValidFloat64 = errFloat64 == nil
isValidInt64 = errInt64 == nil
_, errInt32 := strconv.ParseInt(s64, 10, 32)
isValidInt32 = isValidInt64 && errInt32 == nil
return
return isValidNumber && bigFloat.IsInt()
}
@ -117,9 +147,9 @@ func mustBeInteger(what interface{}) *int {
number := what.(json.Number)
_, _, isValidInt32 := checkJsonNumber(number)
isInt := checkJsonInteger(number)
if isValidInt32 {
if isInt {
int64Value, err := number.Int64()
if err != nil {
@ -138,15 +168,13 @@ func mustBeInteger(what interface{}) *int {
return nil
}
func mustBeNumber(what interface{}) *float64 {
func mustBeNumber(what interface{}) *big.Rat {
if isJsonNumber(what) {
number := what.(json.Number)
float64Value, err := number.Float64()
if err == nil {
return &float64Value
float64Value, success := new(big.Rat).SetString(string(number))
if success {
return float64Value
} else {
return nil
}

View File

@ -27,6 +27,7 @@ package gojsonschema
import (
"encoding/json"
"math/big"
"reflect"
"regexp"
"strconv"
@ -60,24 +61,27 @@ func (v *Schema) Validate(l JSONLoader) (*Result, error) {
return nil, err
}
return v.validateDocument(root), nil
}
func (v *Schema) validateDocument(root interface{}) *Result {
// begin validation
result := &Result{}
context := newJsonContext(STRING_CONTEXT_ROOT, nil)
context := NewJsonContext(STRING_CONTEXT_ROOT, nil)
v.rootSchema.validateRecursive(v.rootSchema, root, result, context)
return result, nil
return result
}
func (v *subSchema) subValidateWithContext(document interface{}, context *jsonContext) *Result {
func (v *subSchema) subValidateWithContext(document interface{}, context *JsonContext) *Result {
result := &Result{}
v.validateRecursive(v, document, result, context)
return result
}
// Walker function to validate the json recursively against the subSchema
func (v *subSchema) validateRecursive(currentSubSchema *subSchema, currentNode interface{}, result *Result, context *jsonContext) {
func (v *subSchema) validateRecursive(currentSubSchema *subSchema, currentNode interface{}, result *Result, context *JsonContext) {
if internalLogEnabled {
internalLog("validateRecursive %s", context.String())
@ -93,7 +97,7 @@ func (v *subSchema) validateRecursive(currentSubSchema *subSchema, currentNode i
// Check for null value
if currentNode == nil {
if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_NULL) {
result.addError(
result.addInternalError(
new(InvalidTypeError),
context,
currentNode,
@ -114,18 +118,18 @@ func (v *subSchema) validateRecursive(currentSubSchema *subSchema, currentNode i
value := currentNode.(json.Number)
_, isValidInt64, _ := checkJsonNumber(value)
isInt := checkJsonInteger(value)
validType := currentSubSchema.types.Contains(TYPE_NUMBER) || (isValidInt64 && currentSubSchema.types.Contains(TYPE_INTEGER))
validType := currentSubSchema.types.Contains(TYPE_NUMBER) || (isInt && currentSubSchema.types.Contains(TYPE_INTEGER))
if currentSubSchema.types.IsTyped() && !validType {
givenType := TYPE_INTEGER
if !isValidInt64 {
if !isInt {
givenType = TYPE_NUMBER
}
result.addError(
result.addInternalError(
new(InvalidTypeError),
context,
currentNode,
@ -154,7 +158,7 @@ func (v *subSchema) validateRecursive(currentSubSchema *subSchema, currentNode i
case reflect.Slice:
if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_ARRAY) {
result.addError(
result.addInternalError(
new(InvalidTypeError),
context,
currentNode,
@ -177,7 +181,7 @@ func (v *subSchema) validateRecursive(currentSubSchema *subSchema, currentNode i
case reflect.Map:
if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_OBJECT) {
result.addError(
result.addInternalError(
new(InvalidTypeError),
context,
currentNode,
@ -202,7 +206,7 @@ func (v *subSchema) validateRecursive(currentSubSchema *subSchema, currentNode i
for _, pSchema := range currentSubSchema.propertiesChildren {
nextNode, ok := castCurrentNode[pSchema.property]
if ok {
subContext := newJsonContext(pSchema.property, context)
subContext := NewJsonContext(pSchema.property, context)
v.validateRecursive(pSchema, nextNode, result, subContext)
}
}
@ -212,7 +216,7 @@ func (v *subSchema) validateRecursive(currentSubSchema *subSchema, currentNode i
case reflect.Bool:
if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_BOOLEAN) {
result.addError(
result.addInternalError(
new(InvalidTypeError),
context,
currentNode,
@ -234,7 +238,7 @@ func (v *subSchema) validateRecursive(currentSubSchema *subSchema, currentNode i
case reflect.String:
if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_STRING) {
result.addError(
result.addInternalError(
new(InvalidTypeError),
context,
currentNode,
@ -263,7 +267,7 @@ func (v *subSchema) validateRecursive(currentSubSchema *subSchema, currentNode i
}
// Different kinds of validation there, subSchema / common / array / object / string...
func (v *subSchema) validateSchema(currentSubSchema *subSchema, currentNode interface{}, result *Result, context *jsonContext) {
func (v *subSchema) validateSchema(currentSubSchema *subSchema, currentNode interface{}, result *Result, context *JsonContext) {
if internalLogEnabled {
internalLog("validateSchema %s", context.String())
@ -287,7 +291,7 @@ func (v *subSchema) validateSchema(currentSubSchema *subSchema, currentNode inte
}
if !validatedAnyOf {
result.addError(new(NumberAnyOfError), context, currentNode, ErrorDetails{})
result.addInternalError(new(NumberAnyOfError), context, currentNode, ErrorDetails{})
if bestValidationResult != nil {
// add error messages of closest matching subSchema as
@ -313,7 +317,7 @@ func (v *subSchema) validateSchema(currentSubSchema *subSchema, currentNode inte
if nbValidated != 1 {
result.addError(new(NumberOneOfError), context, currentNode, ErrorDetails{})
result.addInternalError(new(NumberOneOfError), context, currentNode, ErrorDetails{})
if nbValidated == 0 {
// add error messages of closest matching subSchema as
@ -336,14 +340,14 @@ func (v *subSchema) validateSchema(currentSubSchema *subSchema, currentNode inte
}
if nbValidated != len(currentSubSchema.allOf) {
result.addError(new(NumberAllOfError), context, currentNode, ErrorDetails{})
result.addInternalError(new(NumberAllOfError), context, currentNode, ErrorDetails{})
}
}
if currentSubSchema.not != nil {
validationResult := currentSubSchema.not.subValidateWithContext(currentNode, context)
if validationResult.Valid() {
result.addError(new(NumberNotError), context, currentNode, ErrorDetails{})
result.addInternalError(new(NumberNotError), context, currentNode, ErrorDetails{})
}
}
@ -356,7 +360,7 @@ func (v *subSchema) validateSchema(currentSubSchema *subSchema, currentNode inte
case []string:
for _, dependOnKey := range dependency {
if _, dependencyResolved := currentNode.(map[string]interface{})[dependOnKey]; !dependencyResolved {
result.addError(
result.addInternalError(
new(MissingDependencyError),
context,
currentNode,
@ -367,31 +371,65 @@ func (v *subSchema) validateSchema(currentSubSchema *subSchema, currentNode inte
case *subSchema:
dependency.validateRecursive(dependency, currentNode, result, context)
}
}
}
}
}
if currentSubSchema._if != nil {
validationResultIf := currentSubSchema._if.subValidateWithContext(currentNode, context)
if currentSubSchema._then != nil && validationResultIf.Valid() {
validationResultThen := currentSubSchema._then.subValidateWithContext(currentNode, context)
if !validationResultThen.Valid() {
result.addInternalError(new(ConditionThenError), context, currentNode, ErrorDetails{})
result.mergeErrors(validationResultThen)
}
}
if currentSubSchema._else != nil && !validationResultIf.Valid() {
validationResultElse := currentSubSchema._else.subValidateWithContext(currentNode, context)
if !validationResultElse.Valid() {
result.addInternalError(new(ConditionElseError), context, currentNode, ErrorDetails{})
result.mergeErrors(validationResultElse)
}
}
}
result.incrementScore()
}
func (v *subSchema) validateCommon(currentSubSchema *subSchema, value interface{}, result *Result, context *jsonContext) {
func (v *subSchema) validateCommon(currentSubSchema *subSchema, value interface{}, result *Result, context *JsonContext) {
if internalLogEnabled {
internalLog("validateCommon %s", context.String())
internalLog(" %v", value)
}
// const:
if currentSubSchema._const != nil {
vString, err := marshalWithoutNumber(value)
if err != nil {
result.addInternalError(new(InternalError), context, value, ErrorDetails{"error": err})
}
if *vString != *currentSubSchema._const {
result.addInternalError(new(ConstError),
context,
value,
ErrorDetails{
"allowed": *currentSubSchema._const,
},
)
}
}
// enum:
if len(currentSubSchema.enum) > 0 {
has, err := currentSubSchema.ContainsEnum(value)
if err != nil {
result.addError(new(InternalError), context, value, ErrorDetails{"error": err})
result.addInternalError(new(InternalError), context, value, ErrorDetails{"error": err})
}
if !has {
result.addError(
result.addInternalError(
new(EnumError),
context,
value,
@ -405,7 +443,7 @@ func (v *subSchema) validateCommon(currentSubSchema *subSchema, value interface{
result.incrementScore()
}
func (v *subSchema) validateArray(currentSubSchema *subSchema, value []interface{}, result *Result, context *jsonContext) {
func (v *subSchema) validateArray(currentSubSchema *subSchema, value []interface{}, result *Result, context *JsonContext) {
if internalLogEnabled {
internalLog("validateArray %s", context.String())
@ -417,7 +455,7 @@ func (v *subSchema) validateArray(currentSubSchema *subSchema, value []interface
// TODO explain
if currentSubSchema.itemsChildrenIsSingleSchema {
for i := range value {
subContext := newJsonContext(strconv.Itoa(i), context)
subContext := NewJsonContext(strconv.Itoa(i), context)
validationResult := currentSubSchema.itemsChildren[0].subValidateWithContext(value[i], subContext)
result.mergeErrors(validationResult)
}
@ -428,7 +466,7 @@ func (v *subSchema) validateArray(currentSubSchema *subSchema, value []interface
// while we have both schemas and values, check them against each other
for i := 0; i != nbItems && i != nbValues; i++ {
subContext := newJsonContext(strconv.Itoa(i), context)
subContext := NewJsonContext(strconv.Itoa(i), context)
validationResult := currentSubSchema.itemsChildren[i].subValidateWithContext(value[i], subContext)
result.mergeErrors(validationResult)
}
@ -440,12 +478,12 @@ func (v *subSchema) validateArray(currentSubSchema *subSchema, value []interface
switch currentSubSchema.additionalItems.(type) {
case bool:
if !currentSubSchema.additionalItems.(bool) {
result.addError(new(ArrayNoAdditionalItemsError), context, value, ErrorDetails{})
result.addInternalError(new(ArrayNoAdditionalItemsError), context, value, ErrorDetails{})
}
case *subSchema:
additionalItemSchema := currentSubSchema.additionalItems.(*subSchema)
for i := nbItems; i != nbValues; i++ {
subContext := newJsonContext(strconv.Itoa(i), context)
subContext := NewJsonContext(strconv.Itoa(i), context)
validationResult := additionalItemSchema.subValidateWithContext(value[i], subContext)
result.mergeErrors(validationResult)
}
@ -457,7 +495,7 @@ func (v *subSchema) validateArray(currentSubSchema *subSchema, value []interface
// minItems & maxItems
if currentSubSchema.minItems != nil {
if nbValues < int(*currentSubSchema.minItems) {
result.addError(
result.addInternalError(
new(ArrayMinItemsError),
context,
value,
@ -467,7 +505,7 @@ func (v *subSchema) validateArray(currentSubSchema *subSchema, value []interface
}
if currentSubSchema.maxItems != nil {
if nbValues > int(*currentSubSchema.maxItems) {
result.addError(
result.addInternalError(
new(ArrayMaxItemsError),
context,
value,
@ -479,27 +517,59 @@ func (v *subSchema) validateArray(currentSubSchema *subSchema, value []interface
// uniqueItems:
if currentSubSchema.uniqueItems {
var stringifiedItems []string
for _, v := range value {
vString, err := marshalToJsonString(v)
for j, v := range value {
vString, err := marshalWithoutNumber(v)
if err != nil {
result.addError(new(InternalError), context, value, ErrorDetails{"err": err})
result.addInternalError(new(InternalError), context, value, ErrorDetails{"err": err})
}
if isStringInSlice(stringifiedItems, *vString) {
result.addError(
if i := indexStringInSlice(stringifiedItems, *vString); i > -1 {
result.addInternalError(
new(ItemsMustBeUniqueError),
context,
value,
ErrorDetails{"type": TYPE_ARRAY},
ErrorDetails{"type": TYPE_ARRAY, "i": i, "j": j},
)
}
stringifiedItems = append(stringifiedItems, *vString)
}
}
// contains:
if currentSubSchema.contains != nil {
validatedOne := false
var bestValidationResult *Result
for i, v := range value {
subContext := NewJsonContext(strconv.Itoa(i), context)
validationResult := currentSubSchema.contains.subValidateWithContext(v, subContext)
if validationResult.Valid() {
validatedOne = true
break
} else {
if bestValidationResult == nil || validationResult.score > bestValidationResult.score {
bestValidationResult = validationResult
}
}
}
if !validatedOne {
result.addInternalError(
new(ArrayContainsError),
context,
value,
ErrorDetails{},
)
if bestValidationResult != nil {
result.mergeErrors(bestValidationResult)
}
}
}
result.incrementScore()
}
func (v *subSchema) validateObject(currentSubSchema *subSchema, value map[string]interface{}, result *Result, context *jsonContext) {
func (v *subSchema) validateObject(currentSubSchema *subSchema, value map[string]interface{}, result *Result, context *JsonContext) {
if internalLogEnabled {
internalLog("validateObject %s", context.String())
@ -509,7 +579,7 @@ func (v *subSchema) validateObject(currentSubSchema *subSchema, value map[string
// minProperties & maxProperties:
if currentSubSchema.minProperties != nil {
if len(value) < int(*currentSubSchema.minProperties) {
result.addError(
result.addInternalError(
new(ArrayMinPropertiesError),
context,
value,
@ -519,7 +589,7 @@ func (v *subSchema) validateObject(currentSubSchema *subSchema, value map[string
}
if currentSubSchema.maxProperties != nil {
if len(value) > int(*currentSubSchema.maxProperties) {
result.addError(
result.addInternalError(
new(ArrayMaxPropertiesError),
context,
value,
@ -534,7 +604,7 @@ func (v *subSchema) validateObject(currentSubSchema *subSchema, value map[string
if ok {
result.incrementScore()
} else {
result.addError(
result.addInternalError(
new(RequiredError),
context,
value,
@ -565,7 +635,7 @@ func (v *subSchema) validateObject(currentSubSchema *subSchema, value map[string
if found {
if pp_has && !pp_match {
result.addError(
result.addInternalError(
new(AdditionalPropertyNotAllowedError),
context,
value[pk],
@ -576,7 +646,7 @@ func (v *subSchema) validateObject(currentSubSchema *subSchema, value map[string
} else {
if !pp_has || !pp_match {
result.addError(
result.addInternalError(
new(AdditionalPropertyNotAllowedError),
context,
value[pk],
@ -628,7 +698,7 @@ func (v *subSchema) validateObject(currentSubSchema *subSchema, value map[string
if pp_has && !pp_match {
result.addError(
result.addInternalError(
new(InvalidPropertyPatternError),
context,
value[pk],
@ -642,10 +712,25 @@ func (v *subSchema) validateObject(currentSubSchema *subSchema, value map[string
}
}
// propertyNames:
if currentSubSchema.propertyNames != nil {
for pk := range value {
validationResult := currentSubSchema.propertyNames.subValidateWithContext(pk, context)
if !validationResult.Valid() {
result.addInternalError(new(InvalidPropertyNameError),
context,
value, ErrorDetails{
"property": pk,
})
result.mergeErrors(validationResult)
}
}
}
result.incrementScore()
}
func (v *subSchema) validatePatternProperty(currentSubSchema *subSchema, key string, value interface{}, result *Result, context *jsonContext) (has bool, matched bool) {
func (v *subSchema) validatePatternProperty(currentSubSchema *subSchema, key string, value interface{}, result *Result, context *JsonContext) (has bool, matched bool) {
if internalLogEnabled {
internalLog("validatePatternProperty %s", context.String())
@ -659,12 +744,10 @@ func (v *subSchema) validatePatternProperty(currentSubSchema *subSchema, key str
for pk, pv := range currentSubSchema.patternProperties {
if matches, _ := regexp.MatchString(pk, key); matches {
has = true
subContext := newJsonContext(key, context)
subContext := NewJsonContext(key, context)
validationResult := pv.subValidateWithContext(value, subContext)
result.mergeErrors(validationResult)
if validationResult.Valid() {
validatedkey = true
}
validatedkey = true
}
}
@ -677,7 +760,7 @@ func (v *subSchema) validatePatternProperty(currentSubSchema *subSchema, key str
return has, true
}
func (v *subSchema) validateString(currentSubSchema *subSchema, value interface{}, result *Result, context *jsonContext) {
func (v *subSchema) validateString(currentSubSchema *subSchema, value interface{}, result *Result, context *JsonContext) {
// Ignore JSON numbers
if isJsonNumber(value) {
@ -699,7 +782,7 @@ func (v *subSchema) validateString(currentSubSchema *subSchema, value interface{
// minLength & maxLength:
if currentSubSchema.minLength != nil {
if utf8.RuneCount([]byte(stringValue)) < int(*currentSubSchema.minLength) {
result.addError(
result.addInternalError(
new(StringLengthGTEError),
context,
value,
@ -709,7 +792,7 @@ func (v *subSchema) validateString(currentSubSchema *subSchema, value interface{
}
if currentSubSchema.maxLength != nil {
if utf8.RuneCount([]byte(stringValue)) > int(*currentSubSchema.maxLength) {
result.addError(
result.addInternalError(
new(StringLengthLTEError),
context,
value,
@ -721,7 +804,7 @@ func (v *subSchema) validateString(currentSubSchema *subSchema, value interface{
// pattern:
if currentSubSchema.pattern != nil {
if !currentSubSchema.pattern.MatchString(stringValue) {
result.addError(
result.addInternalError(
new(DoesNotMatchPatternError),
context,
value,
@ -734,7 +817,7 @@ func (v *subSchema) validateString(currentSubSchema *subSchema, value interface{
// format
if currentSubSchema.format != "" {
if !FormatCheckers.IsFormat(currentSubSchema.format, stringValue) {
result.addError(
result.addInternalError(
new(DoesNotMatchFormatError),
context,
value,
@ -746,7 +829,7 @@ func (v *subSchema) validateString(currentSubSchema *subSchema, value interface{
result.incrementScore()
}
func (v *subSchema) validateNumber(currentSubSchema *subSchema, value interface{}, result *Result, context *jsonContext) {
func (v *subSchema) validateNumber(currentSubSchema *subSchema, value interface{}, result *Result, context *JsonContext) {
// Ignore non numbers
if !isJsonNumber(value) {
@ -759,79 +842,77 @@ func (v *subSchema) validateNumber(currentSubSchema *subSchema, value interface{
}
number := value.(json.Number)
float64Value, _ := number.Float64()
float64Value, _ := new(big.Rat).SetString(string(number))
// multipleOf:
if currentSubSchema.multipleOf != nil {
if !isFloat64AnInteger(float64Value / *currentSubSchema.multipleOf) {
result.addError(
if q := new(big.Rat).Quo(float64Value, currentSubSchema.multipleOf); !q.IsInt() {
result.addInternalError(
new(MultipleOfError),
context,
resultErrorFormatJsonNumber(number),
ErrorDetails{"multiple": *currentSubSchema.multipleOf},
ErrorDetails{"multiple": new(big.Float).SetRat(currentSubSchema.multipleOf)},
)
}
}
//maximum & exclusiveMaximum:
if currentSubSchema.maximum != nil {
if currentSubSchema.exclusiveMaximum {
if float64Value >= *currentSubSchema.maximum {
result.addError(
new(NumberLTError),
context,
resultErrorFormatJsonNumber(number),
ErrorDetails{
"max": resultErrorFormatNumber(*currentSubSchema.maximum),
},
)
}
} else {
if float64Value > *currentSubSchema.maximum {
result.addError(
new(NumberLTEError),
context,
resultErrorFormatJsonNumber(number),
ErrorDetails{
"max": resultErrorFormatNumber(*currentSubSchema.maximum),
},
)
}
if float64Value.Cmp(currentSubSchema.maximum) == 1 {
result.addInternalError(
new(NumberLTEError),
context,
resultErrorFormatJsonNumber(number),
ErrorDetails{
"max": currentSubSchema.maximum,
},
)
}
}
if currentSubSchema.exclusiveMaximum != nil {
if float64Value.Cmp(currentSubSchema.exclusiveMaximum) >= 0 {
result.addInternalError(
new(NumberLTError),
context,
resultErrorFormatJsonNumber(number),
ErrorDetails{
"max": currentSubSchema.exclusiveMaximum,
},
)
}
}
//minimum & exclusiveMinimum:
if currentSubSchema.minimum != nil {
if currentSubSchema.exclusiveMinimum {
if float64Value <= *currentSubSchema.minimum {
result.addError(
new(NumberGTError),
context,
resultErrorFormatJsonNumber(number),
ErrorDetails{
"min": resultErrorFormatNumber(*currentSubSchema.minimum),
},
)
}
} else {
if float64Value < *currentSubSchema.minimum {
result.addError(
new(NumberGTEError),
context,
resultErrorFormatJsonNumber(number),
ErrorDetails{
"min": resultErrorFormatNumber(*currentSubSchema.minimum),
},
)
}
if float64Value.Cmp(currentSubSchema.minimum) == -1 {
result.addInternalError(
new(NumberGTEError),
context,
resultErrorFormatJsonNumber(number),
ErrorDetails{
"min": currentSubSchema.minimum,
},
)
}
}
if currentSubSchema.exclusiveMinimum != nil {
if float64Value.Cmp(currentSubSchema.exclusiveMinimum) <= 0 {
// if float64Value <= *currentSubSchema.minimum {
result.addInternalError(
new(NumberGTError),
context,
resultErrorFormatJsonNumber(number),
ErrorDetails{
"min": currentSubSchema.exclusiveMinimum,
},
)
}
}
// format
if currentSubSchema.format != "" {
if !FormatCheckers.IsFormat(currentSubSchema.format, float64Value) {
result.addError(
result.addInternalError(
new(DoesNotMatchFormatError),
context,
value,

View File

@ -325,16 +325,14 @@ func ReadEntity(packets *packet.Reader) (*Entity, error) {
if e.PrivateKey, ok = p.(*packet.PrivateKey); !ok {
packets.Unread(p)
return nil, errors.StructuralError("first packet was not a public/private key")
} else {
e.PrimaryKey = &e.PrivateKey.PublicKey
}
e.PrimaryKey = &e.PrivateKey.PublicKey
}
if !e.PrimaryKey.PubKeyAlgo.CanSign() {
return nil, errors.StructuralError("primary key cannot be used for signatures")
}
var current *Identity
var revocations []*packet.Signature
EachPacket:
for {
@ -347,32 +345,8 @@ EachPacket:
switch pkt := p.(type) {
case *packet.UserId:
current = new(Identity)
current.Name = pkt.Id
current.UserId = pkt
e.Identities[pkt.Id] = current
for {
p, err = packets.Next()
if err == io.EOF {
return nil, io.ErrUnexpectedEOF
} else if err != nil {
return nil, err
}
sig, ok := p.(*packet.Signature)
if !ok {
return nil, errors.StructuralError("user ID packet not followed by self-signature")
}
if (sig.SigType == packet.SigTypePositiveCert || sig.SigType == packet.SigTypeGenericCert) && sig.IssuerKeyId != nil && *sig.IssuerKeyId == e.PrimaryKey.KeyId {
if err = e.PrimaryKey.VerifyUserIdSignature(pkt.Id, e.PrimaryKey, sig); err != nil {
return nil, errors.StructuralError("user ID self-signature invalid: " + err.Error())
}
current.SelfSignature = sig
break
}
current.Signatures = append(current.Signatures, sig)
if err := addUserID(e, packets, pkt); err != nil {
return nil, err
}
case *packet.Signature:
if pkt.SigType == packet.SigTypeKeyRevocation {
@ -381,11 +355,9 @@ EachPacket:
// TODO: RFC4880 5.2.1 permits signatures
// directly on keys (eg. to bind additional
// revocation keys).
} else if current == nil {
return nil, errors.StructuralError("signature packet found before user id packet")
} else {
current.Signatures = append(current.Signatures, pkt)
}
// Else, ignoring the signature as it does not follow anything
// we would know to attach it to.
case *packet.PrivateKey:
if pkt.IsSubkey == false {
packets.Unread(p)
@ -426,33 +398,105 @@ EachPacket:
return e, nil
}
func addUserID(e *Entity, packets *packet.Reader, pkt *packet.UserId) error {
// Make a new Identity object, that we might wind up throwing away.
// We'll only add it if we get a valid self-signature over this
// userID.
identity := new(Identity)
identity.Name = pkt.Id
identity.UserId = pkt
for {
p, err := packets.Next()
if err == io.EOF {
break
} else if err != nil {
return err
}
sig, ok := p.(*packet.Signature)
if !ok {
packets.Unread(p)
break
}
if (sig.SigType == packet.SigTypePositiveCert || sig.SigType == packet.SigTypeGenericCert) && sig.IssuerKeyId != nil && *sig.IssuerKeyId == e.PrimaryKey.KeyId {
if err = e.PrimaryKey.VerifyUserIdSignature(pkt.Id, e.PrimaryKey, sig); err != nil {
return errors.StructuralError("user ID self-signature invalid: " + err.Error())
}
identity.SelfSignature = sig
e.Identities[pkt.Id] = identity
} else {
identity.Signatures = append(identity.Signatures, sig)
}
}
return nil
}
func addSubkey(e *Entity, packets *packet.Reader, pub *packet.PublicKey, priv *packet.PrivateKey) error {
var subKey Subkey
subKey.PublicKey = pub
subKey.PrivateKey = priv
p, err := packets.Next()
if err == io.EOF {
return io.ErrUnexpectedEOF
for {
p, err := packets.Next()
if err == io.EOF {
break
} else if err != nil {
return errors.StructuralError("subkey signature invalid: " + err.Error())
}
sig, ok := p.(*packet.Signature)
if !ok {
packets.Unread(p)
break
}
if sig.SigType != packet.SigTypeSubkeyBinding && sig.SigType != packet.SigTypeSubkeyRevocation {
return errors.StructuralError("subkey signature with wrong type")
}
if err := e.PrimaryKey.VerifyKeySignature(subKey.PublicKey, sig); err != nil {
return errors.StructuralError("subkey signature invalid: " + err.Error())
}
switch sig.SigType {
case packet.SigTypeSubkeyRevocation:
subKey.Sig = sig
case packet.SigTypeSubkeyBinding:
if shouldReplaceSubkeySig(subKey.Sig, sig) {
subKey.Sig = sig
}
}
}
if err != nil {
return errors.StructuralError("subkey signature invalid: " + err.Error())
}
var ok bool
subKey.Sig, ok = p.(*packet.Signature)
if !ok {
if subKey.Sig == nil {
return errors.StructuralError("subkey packet not followed by signature")
}
if subKey.Sig.SigType != packet.SigTypeSubkeyBinding && subKey.Sig.SigType != packet.SigTypeSubkeyRevocation {
return errors.StructuralError("subkey signature with wrong type")
}
err = e.PrimaryKey.VerifyKeySignature(subKey.PublicKey, subKey.Sig)
if err != nil {
return errors.StructuralError("subkey signature invalid: " + err.Error())
}
e.Subkeys = append(e.Subkeys, subKey)
return nil
}
func shouldReplaceSubkeySig(existingSig, potentialNewSig *packet.Signature) bool {
if potentialNewSig == nil {
return false
}
if existingSig == nil {
return true
}
if existingSig.SigType == packet.SigTypeSubkeyRevocation {
return false // never override a revocation signature
}
return potentialNewSig.CreationTime.After(existingSig.CreationTime)
}
const defaultRSAKeyBits = 2048
// NewEntity returns an Entity that contains a fresh RSA/RSA keypair with a
@ -487,7 +531,7 @@ func NewEntity(name, comment, email string, config *packet.Config) (*Entity, err
}
isPrimaryId := true
e.Identities[uid.Id] = &Identity{
Name: uid.Name,
Name: uid.Id,
UserId: uid,
SelfSignature: &packet.Signature{
CreationTime: currentTime,
@ -501,6 +545,10 @@ func NewEntity(name, comment, email string, config *packet.Config) (*Entity, err
IssuerKeyId: &e.PrimaryKey.KeyId,
},
}
err = e.Identities[uid.Id].SelfSignature.SignUserId(uid.Id, e.PrimaryKey, e.PrivateKey, config)
if err != nil {
return nil, err
}
// If the user passes in a DefaultHash via packet.Config,
// set the PreferredHash for the SelfSignature.
@ -508,6 +556,11 @@ func NewEntity(name, comment, email string, config *packet.Config) (*Entity, err
e.Identities[uid.Id].SelfSignature.PreferredHash = []uint8{hashToHashId(config.DefaultHash)}
}
// Likewise for DefaultCipher.
if config != nil && config.DefaultCipher != 0 {
e.Identities[uid.Id].SelfSignature.PreferredSymmetric = []uint8{uint8(config.DefaultCipher)}
}
e.Subkeys = make([]Subkey, 1)
e.Subkeys[0] = Subkey{
PublicKey: packet.NewRSAPublicKey(currentTime, &encryptingPriv.PublicKey),
@ -525,13 +578,16 @@ func NewEntity(name, comment, email string, config *packet.Config) (*Entity, err
}
e.Subkeys[0].PublicKey.IsSubkey = true
e.Subkeys[0].PrivateKey.IsSubkey = true
err = e.Subkeys[0].Sig.SignKey(e.Subkeys[0].PublicKey, e.PrivateKey, config)
if err != nil {
return nil, err
}
return e, nil
}
// SerializePrivate serializes an Entity, including private key material, to
// the given Writer. For now, it must only be used on an Entity returned from
// NewEntity.
// SerializePrivate serializes an Entity, including private key material, but
// excluding signatures from other entities, to the given Writer.
// Identities and subkeys are re-signed in case they changed since NewEntry.
// If config is nil, sensible defaults will be used.
func (e *Entity) SerializePrivate(w io.Writer, config *packet.Config) (err error) {
err = e.PrivateKey.Serialize(w)
@ -569,8 +625,8 @@ func (e *Entity) SerializePrivate(w io.Writer, config *packet.Config) (err error
return nil
}
// Serialize writes the public part of the given Entity to w. (No private
// key material will be output).
// Serialize writes the public part of the given Entity to w, including
// signatures from other entities. No private key material will be output.
func (e *Entity) Serialize(w io.Writer) error {
err := e.PrimaryKey.Serialize(w)
if err != nil {

View File

@ -42,12 +42,18 @@ func (e *EncryptedKey) parse(r io.Reader) (err error) {
switch e.Algo {
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r)
if err != nil {
return
}
case PubKeyAlgoElGamal:
e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r)
if err != nil {
return
}
e.encryptedMPI2.bytes, e.encryptedMPI2.bitLength, err = readMPI(r)
if err != nil {
return
}
}
_, err = consumeAll(r)
return
@ -72,7 +78,8 @@ func (e *EncryptedKey) Decrypt(priv *PrivateKey, config *Config) error {
// padding oracle attacks.
switch priv.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
b, err = rsa.DecryptPKCS1v15(config.Random(), priv.PrivateKey.(*rsa.PrivateKey), e.encryptedMPI1.bytes)
k := priv.PrivateKey.(*rsa.PrivateKey)
b, err = rsa.DecryptPKCS1v15(config.Random(), k, padToKeySize(&k.PublicKey, e.encryptedMPI1.bytes))
case PubKeyAlgoElGamal:
c1 := new(big.Int).SetBytes(e.encryptedMPI1.bytes)
c2 := new(big.Int).SetBytes(e.encryptedMPI2.bytes)

View File

@ -11,10 +11,12 @@ import (
"crypto/aes"
"crypto/cipher"
"crypto/des"
"golang.org/x/crypto/cast5"
"golang.org/x/crypto/openpgp/errors"
"crypto/rsa"
"io"
"math/big"
"golang.org/x/crypto/cast5"
"golang.org/x/crypto/openpgp/errors"
)
// readFull is the same as io.ReadFull except that reading zero bytes returns
@ -402,14 +404,16 @@ const (
type PublicKeyAlgorithm uint8
const (
PubKeyAlgoRSA PublicKeyAlgorithm = 1
PubKeyAlgoRSAEncryptOnly PublicKeyAlgorithm = 2
PubKeyAlgoRSASignOnly PublicKeyAlgorithm = 3
PubKeyAlgoElGamal PublicKeyAlgorithm = 16
PubKeyAlgoDSA PublicKeyAlgorithm = 17
PubKeyAlgoRSA PublicKeyAlgorithm = 1
PubKeyAlgoElGamal PublicKeyAlgorithm = 16
PubKeyAlgoDSA PublicKeyAlgorithm = 17
// RFC 6637, Section 5.
PubKeyAlgoECDH PublicKeyAlgorithm = 18
PubKeyAlgoECDSA PublicKeyAlgorithm = 19
// Deprecated in RFC 4880, Section 13.5. Use key flags instead.
PubKeyAlgoRSAEncryptOnly PublicKeyAlgorithm = 2
PubKeyAlgoRSASignOnly PublicKeyAlgorithm = 3
)
// CanEncrypt returns true if it's possible to encrypt a message to a public
@ -500,19 +504,17 @@ func readMPI(r io.Reader) (mpi []byte, bitLength uint16, err error) {
numBytes := (int(bitLength) + 7) / 8
mpi = make([]byte, numBytes)
_, err = readFull(r, mpi)
return
}
// mpiLength returns the length of the given *big.Int when serialized as an
// MPI.
func mpiLength(n *big.Int) (mpiLengthInBytes int) {
mpiLengthInBytes = 2 /* MPI length */
mpiLengthInBytes += (n.BitLen() + 7) / 8
// According to RFC 4880 3.2. we should check that the MPI has no leading
// zeroes (at least when not an encrypted MPI?), but this implementation
// does generate leading zeroes, so we keep accepting them.
return
}
// writeMPI serializes a big integer to w.
func writeMPI(w io.Writer, bitLength uint16, mpiBytes []byte) (err error) {
// Note that we can produce leading zeroes, in violation of RFC 4880 3.2.
// Implementations seem to be tolerant of them, and stripping them would
// make it complex to guarantee matching re-serialization.
_, err = w.Write([]byte{byte(bitLength >> 8), byte(bitLength)})
if err == nil {
_, err = w.Write(mpiBytes)
@ -525,6 +527,18 @@ func writeBig(w io.Writer, i *big.Int) error {
return writeMPI(w, uint16(i.BitLen()), i.Bytes())
}
// padToKeySize left-pads a MPI with zeroes to match the length of the
// specified RSA public.
func padToKeySize(pub *rsa.PublicKey, b []byte) []byte {
k := (pub.N.BitLen() + 7) / 8
if len(b) >= k {
return b
}
bb := make([]byte, k)
copy(bb[len(bb)-len(b):], b)
return bb
}
// CompressionAlgo Represents the different compression algorithms
// supported by OpenPGP (except for BZIP2, which is not currently
// supported). See Section 9.3 of RFC 4880.

View File

@ -64,14 +64,19 @@ func NewECDSAPrivateKey(currentTime time.Time, priv *ecdsa.PrivateKey) *PrivateK
return pk
}
// NewSignerPrivateKey creates a sign-only PrivateKey from a crypto.Signer that
// NewSignerPrivateKey creates a PrivateKey from a crypto.Signer that
// implements RSA or ECDSA.
func NewSignerPrivateKey(currentTime time.Time, signer crypto.Signer) *PrivateKey {
pk := new(PrivateKey)
// In general, the public Keys should be used as pointers. We still
// type-switch on the values, for backwards-compatibility.
switch pubkey := signer.Public().(type) {
case *rsa.PublicKey:
pk.PublicKey = *NewRSAPublicKey(currentTime, pubkey)
case rsa.PublicKey:
pk.PublicKey = *NewRSAPublicKey(currentTime, &pubkey)
pk.PubKeyAlgo = PubKeyAlgoRSASignOnly
case *ecdsa.PublicKey:
pk.PublicKey = *NewECDSAPublicKey(currentTime, pubkey)
case ecdsa.PublicKey:
pk.PublicKey = *NewECDSAPublicKey(currentTime, &pubkey)
default:

View File

@ -244,7 +244,12 @@ func NewECDSAPublicKey(creationTime time.Time, pub *ecdsa.PublicKey) *PublicKey
}
pk.ec.p.bytes = elliptic.Marshal(pub.Curve, pub.X, pub.Y)
pk.ec.p.bitLength = uint16(8 * len(pk.ec.p.bytes))
// The bit length is 3 (for the 0x04 specifying an uncompressed key)
// plus two field elements (for x and y), which are rounded up to the
// nearest byte. See https://tools.ietf.org/html/rfc6637#section-6
fieldBytes := (pub.Curve.Params().BitSize + 7) & ^7
pk.ec.p.bitLength = uint16(3 + fieldBytes + fieldBytes)
pk.setFingerPrintAndKeyId()
return pk
@ -515,7 +520,7 @@ func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err erro
switch pk.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
rsaPublicKey, _ := pk.PublicKey.(*rsa.PublicKey)
err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, sig.RSASignature.bytes)
err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, padToKeySize(rsaPublicKey, sig.RSASignature.bytes))
if err != nil {
return errors.SignatureError("RSA verification failure")
}
@ -566,7 +571,7 @@ func (pk *PublicKey) VerifySignatureV3(signed hash.Hash, sig *SignatureV3) (err
switch pk.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
rsaPublicKey := pk.PublicKey.(*rsa.PublicKey)
if err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, sig.RSASignature.bytes); err != nil {
if err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, padToKeySize(rsaPublicKey, sig.RSASignature.bytes)); err != nil {
return errors.SignatureError("RSA verification failure")
}
return

View File

@ -542,7 +542,7 @@ func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err e
r, s, err = ecdsa.Sign(config.Random(), pk, digest)
} else {
var b []byte
b, err = priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, nil)
b, err = priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, sig.Hash)
if err == nil {
r, s, err = unwrapECDSASig(b)
}

View File

@ -80,7 +80,7 @@ func (uat *UserAttribute) Serialize(w io.Writer) (err error) {
// ImageData returns zero or more byte slices, each containing
// JPEG File Interchange Format (JFIF), for each photo in the
// the user attribute packet.
// user attribute packet.
func (uat *UserAttribute) ImageData() (imageData [][]byte) {
for _, sp := range uat.Contents {
if sp.SubType == UserAttrImageSubpacket && len(sp.Contents) > 16 {

View File

@ -164,12 +164,12 @@ func hashToHashId(h crypto.Hash) uint8 {
return v
}
// Encrypt encrypts a message to a number of recipients and, optionally, signs
// it. hints contains optional information, that is also encrypted, that aids
// the recipients in processing the message. The resulting WriteCloser must
// be closed after the contents of the file have been written.
// If config is nil, sensible defaults will be used.
func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) {
// writeAndSign writes the data as a payload package and, optionally, signs
// it. hints contains optional information, that is also encrypted,
// that aids the recipients in processing the message. The resulting
// WriteCloser must be closed after the contents of the file have been
// written. If config is nil, sensible defaults will be used.
func writeAndSign(payload io.WriteCloser, candidateHashes []uint8, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) {
var signer *packet.PrivateKey
if signed != nil {
signKey, ok := signed.signingKey(config.Now())
@ -185,6 +185,83 @@ func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHint
}
}
var hash crypto.Hash
for _, hashId := range candidateHashes {
if h, ok := s2k.HashIdToHash(hashId); ok && h.Available() {
hash = h
break
}
}
// If the hash specified by config is a candidate, we'll use that.
if configuredHash := config.Hash(); configuredHash.Available() {
for _, hashId := range candidateHashes {
if h, ok := s2k.HashIdToHash(hashId); ok && h == configuredHash {
hash = h
break
}
}
}
if hash == 0 {
hashId := candidateHashes[0]
name, ok := s2k.HashIdToString(hashId)
if !ok {
name = "#" + strconv.Itoa(int(hashId))
}
return nil, errors.InvalidArgumentError("cannot encrypt because no candidate hash functions are compiled in. (Wanted " + name + " in this case.)")
}
if signer != nil {
ops := &packet.OnePassSignature{
SigType: packet.SigTypeBinary,
Hash: hash,
PubKeyAlgo: signer.PubKeyAlgo,
KeyId: signer.KeyId,
IsLast: true,
}
if err := ops.Serialize(payload); err != nil {
return nil, err
}
}
if hints == nil {
hints = &FileHints{}
}
w := payload
if signer != nil {
// If we need to write a signature packet after the literal
// data then we need to stop literalData from closing
// encryptedData.
w = noOpCloser{w}
}
var epochSeconds uint32
if !hints.ModTime.IsZero() {
epochSeconds = uint32(hints.ModTime.Unix())
}
literalData, err := packet.SerializeLiteral(w, hints.IsBinary, hints.FileName, epochSeconds)
if err != nil {
return nil, err
}
if signer != nil {
return signatureWriter{payload, literalData, hash, hash.New(), signer, config}, nil
}
return literalData, nil
}
// Encrypt encrypts a message to a number of recipients and, optionally, signs
// it. hints contains optional information, that is also encrypted, that aids
// the recipients in processing the message. The resulting WriteCloser must
// be closed after the contents of the file have been written.
// If config is nil, sensible defaults will be used.
func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) {
if len(to) == 0 {
return nil, errors.InvalidArgumentError("no encryption recipient provided")
}
// These are the possible ciphers that we'll use for the message.
candidateCiphers := []uint8{
uint8(packet.CipherAES128),
@ -194,6 +271,7 @@ func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHint
// These are the possible hash functions that we'll use for the signature.
candidateHashes := []uint8{
hashToHashId(crypto.SHA256),
hashToHashId(crypto.SHA384),
hashToHashId(crypto.SHA512),
hashToHashId(crypto.SHA1),
hashToHashId(crypto.RIPEMD160),
@ -241,33 +319,6 @@ func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHint
}
}
var hash crypto.Hash
for _, hashId := range candidateHashes {
if h, ok := s2k.HashIdToHash(hashId); ok && h.Available() {
hash = h
break
}
}
// If the hash specified by config is a candidate, we'll use that.
if configuredHash := config.Hash(); configuredHash.Available() {
for _, hashId := range candidateHashes {
if h, ok := s2k.HashIdToHash(hashId); ok && h == configuredHash {
hash = h
break
}
}
}
if hash == 0 {
hashId := candidateHashes[0]
name, ok := s2k.HashIdToString(hashId)
if !ok {
name = "#" + strconv.Itoa(int(hashId))
}
return nil, errors.InvalidArgumentError("cannot encrypt because no candidate hash functions are compiled in. (Wanted " + name + " in this case.)")
}
symKey := make([]byte, cipher.KeySize())
if _, err := io.ReadFull(config.Random(), symKey); err != nil {
return nil, err
@ -279,49 +330,38 @@ func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHint
}
}
encryptedData, err := packet.SerializeSymmetricallyEncrypted(ciphertext, cipher, symKey, config)
payload, err := packet.SerializeSymmetricallyEncrypted(ciphertext, cipher, symKey, config)
if err != nil {
return
}
if signer != nil {
ops := &packet.OnePassSignature{
SigType: packet.SigTypeBinary,
Hash: hash,
PubKeyAlgo: signer.PubKeyAlgo,
KeyId: signer.KeyId,
IsLast: true,
}
if err := ops.Serialize(encryptedData); err != nil {
return nil, err
}
return writeAndSign(payload, candidateHashes, signed, hints, config)
}
// Sign signs a message. The resulting WriteCloser must be closed after the
// contents of the file have been written. hints contains optional information
// that aids the recipients in processing the message.
// If config is nil, sensible defaults will be used.
func Sign(output io.Writer, signed *Entity, hints *FileHints, config *packet.Config) (input io.WriteCloser, err error) {
if signed == nil {
return nil, errors.InvalidArgumentError("no signer provided")
}
if hints == nil {
hints = &FileHints{}
// These are the possible hash functions that we'll use for the signature.
candidateHashes := []uint8{
hashToHashId(crypto.SHA256),
hashToHashId(crypto.SHA384),
hashToHashId(crypto.SHA512),
hashToHashId(crypto.SHA1),
hashToHashId(crypto.RIPEMD160),
}
w := encryptedData
if signer != nil {
// If we need to write a signature packet after the literal
// data then we need to stop literalData from closing
// encryptedData.
w = noOpCloser{encryptedData}
defaultHashes := candidateHashes[len(candidateHashes)-1:]
preferredHashes := signed.primaryIdentity().SelfSignature.PreferredHash
if len(preferredHashes) == 0 {
preferredHashes = defaultHashes
}
var epochSeconds uint32
if !hints.ModTime.IsZero() {
epochSeconds = uint32(hints.ModTime.Unix())
}
literalData, err := packet.SerializeLiteral(w, hints.IsBinary, hints.FileName, epochSeconds)
if err != nil {
return nil, err
}
if signer != nil {
return signatureWriter{encryptedData, literalData, hash, hash.New(), signer, config}, nil
}
return literalData, nil
candidateHashes = intersectPreferences(candidateHashes, preferredHashes)
return writeAndSign(noOpCloser{output}, candidateHashes, signed, hints, config)
}
// signatureWriter hashes the contents of a message while passing it along to

View File

@ -617,7 +617,7 @@ func writeWithCRLF(w io.Writer, buf []byte) (n int, err error) {
if _, err = w.Write(crlf); err != nil {
return n, err
}
n += 1
n++
buf = buf[1:]
}
}

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd
// +build aix darwin dragonfly freebsd linux,!appengine netbsd openbsd
// Package terminal provides support functions for dealing with terminals, as
// commonly found on UNIX systems.
@ -17,44 +17,41 @@
package terminal // import "golang.org/x/crypto/ssh/terminal"
import (
"syscall"
"unsafe"
"golang.org/x/sys/unix"
)
// State contains the state of a terminal.
type State struct {
termios syscall.Termios
termios unix.Termios
}
// IsTerminal returns true if the given file descriptor is a terminal.
// IsTerminal returns whether the given file descriptor is a terminal.
func IsTerminal(fd int) bool {
var termios syscall.Termios
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
return err == 0
_, err := unix.IoctlGetTermios(fd, ioctlReadTermios)
return err == nil
}
// MakeRaw put the terminal connected to the given file descriptor into raw
// mode and returns the previous state of the terminal so that it can be
// restored.
func MakeRaw(fd int) (*State, error) {
var oldState State
if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState.termios)), 0, 0, 0); err != 0 {
termios, err := unix.IoctlGetTermios(fd, ioctlReadTermios)
if err != nil {
return nil, err
}
newState := oldState.termios
oldState := State{termios: *termios}
// This attempts to replicate the behaviour documented for cfmakeraw in
// the termios(3) manpage.
newState.Iflag &^= syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON
newState.Oflag &^= syscall.OPOST
newState.Lflag &^= syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN
newState.Cflag &^= syscall.CSIZE | syscall.PARENB
newState.Cflag |= syscall.CS8
newState.Cc[unix.VMIN] = 1
newState.Cc[unix.VTIME] = 0
if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); err != 0 {
termios.Iflag &^= unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON
termios.Oflag &^= unix.OPOST
termios.Lflag &^= unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN
termios.Cflag &^= unix.CSIZE | unix.PARENB
termios.Cflag |= unix.CS8
termios.Cc[unix.VMIN] = 1
termios.Cc[unix.VTIME] = 0
if err := unix.IoctlSetTermios(fd, ioctlWriteTermios, termios); err != nil {
return nil, err
}
@ -64,60 +61,54 @@ func MakeRaw(fd int) (*State, error) {
// GetState returns the current state of a terminal which may be useful to
// restore the terminal after a signal.
func GetState(fd int) (*State, error) {
var oldState State
if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState.termios)), 0, 0, 0); err != 0 {
termios, err := unix.IoctlGetTermios(fd, ioctlReadTermios)
if err != nil {
return nil, err
}
return &oldState, nil
return &State{termios: *termios}, nil
}
// Restore restores the terminal connected to the given file descriptor to a
// previous state.
func Restore(fd int, state *State) error {
if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&state.termios)), 0, 0, 0); err != 0 {
return err
}
return nil
return unix.IoctlSetTermios(fd, ioctlWriteTermios, &state.termios)
}
// GetSize returns the dimensions of the given terminal.
func GetSize(fd int) (width, height int, err error) {
var dimensions [4]uint16
if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(&dimensions)), 0, 0, 0); err != 0 {
ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ)
if err != nil {
return -1, -1, err
}
return int(dimensions[1]), int(dimensions[0]), nil
return int(ws.Col), int(ws.Row), nil
}
// passwordReader is an io.Reader that reads from a specific file descriptor.
type passwordReader int
func (r passwordReader) Read(buf []byte) (int, error) {
return syscall.Read(int(r), buf)
return unix.Read(int(r), buf)
}
// ReadPassword reads a line of input from a terminal without local echo. This
// is commonly used for inputting passwords and other sensitive data. The slice
// returned does not include the \n.
func ReadPassword(fd int) ([]byte, error) {
var oldState syscall.Termios
if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0); err != 0 {
termios, err := unix.IoctlGetTermios(fd, ioctlReadTermios)
if err != nil {
return nil, err
}
newState := oldState
newState.Lflag &^= syscall.ECHO
newState.Lflag |= syscall.ICANON | syscall.ISIG
newState.Iflag |= syscall.ICRNL
if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); err != 0 {
newState := *termios
newState.Lflag &^= unix.ECHO
newState.Lflag |= unix.ICANON | unix.ISIG
newState.Iflag |= unix.ICRNL
if err := unix.IoctlSetTermios(fd, ioctlWriteTermios, &newState); err != nil {
return nil, err
}
defer func() {
syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0)
}()
defer unix.IoctlSetTermios(fd, ioctlWriteTermios, termios)
return readPasswordLine(passwordReader(fd))
}

View File

@ -1,14 +1,12 @@
// Based on ssh/terminal:
// Copyright 2013 The Go Authors. All rights reserved.
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !appengine
// +build aix
package logrus
package terminal
import "golang.org/x/sys/unix"
const ioctlReadTermios = unix.TCGETS
type Termios unix.Termios
const ioctlWriteTermios = unix.TCSETS

View File

@ -21,7 +21,7 @@ import (
type State struct{}
// IsTerminal returns true if the given file descriptor is a terminal.
// IsTerminal returns whether the given file descriptor is a terminal.
func IsTerminal(fd int) bool {
return false
}

View File

@ -14,10 +14,10 @@ import (
// State contains the state of a terminal.
type State struct {
state *unix.Termios
termios unix.Termios
}
// IsTerminal returns true if the given file descriptor is a terminal.
// IsTerminal returns whether the given file descriptor is a terminal.
func IsTerminal(fd int) bool {
_, err := unix.IoctlGetTermio(fd, unix.TCGETA)
return err == nil
@ -75,47 +75,43 @@ func ReadPassword(fd int) ([]byte, error) {
// restored.
// see http://cr.illumos.org/~webrev/andy_js/1060/
func MakeRaw(fd int) (*State, error) {
oldTermiosPtr, err := unix.IoctlGetTermios(fd, unix.TCGETS)
termios, err := unix.IoctlGetTermios(fd, unix.TCGETS)
if err != nil {
return nil, err
}
oldTermios := *oldTermiosPtr
newTermios := oldTermios
newTermios.Iflag &^= syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON
newTermios.Oflag &^= syscall.OPOST
newTermios.Lflag &^= syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN
newTermios.Cflag &^= syscall.CSIZE | syscall.PARENB
newTermios.Cflag |= syscall.CS8
newTermios.Cc[unix.VMIN] = 1
newTermios.Cc[unix.VTIME] = 0
oldState := State{termios: *termios}
if err := unix.IoctlSetTermios(fd, unix.TCSETS, &newTermios); err != nil {
termios.Iflag &^= unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON
termios.Oflag &^= unix.OPOST
termios.Lflag &^= unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN
termios.Cflag &^= unix.CSIZE | unix.PARENB
termios.Cflag |= unix.CS8
termios.Cc[unix.VMIN] = 1
termios.Cc[unix.VTIME] = 0
if err := unix.IoctlSetTermios(fd, unix.TCSETS, termios); err != nil {
return nil, err
}
return &State{
state: oldTermiosPtr,
}, nil
return &oldState, nil
}
// Restore restores the terminal connected to the given file descriptor to a
// previous state.
func Restore(fd int, oldState *State) error {
return unix.IoctlSetTermios(fd, unix.TCSETS, oldState.state)
return unix.IoctlSetTermios(fd, unix.TCSETS, &oldState.termios)
}
// GetState returns the current state of a terminal which may be useful to
// restore the terminal after a signal.
func GetState(fd int) (*State, error) {
oldTermiosPtr, err := unix.IoctlGetTermios(fd, unix.TCGETS)
termios, err := unix.IoctlGetTermios(fd, unix.TCGETS)
if err != nil {
return nil, err
}
return &State{
state: oldTermiosPtr,
}, nil
return &State{termios: *termios}, nil
}
// GetSize returns the dimensions of the given terminal.

View File

@ -17,6 +17,8 @@
package terminal
import (
"os"
"golang.org/x/sys/windows"
)
@ -24,7 +26,7 @@ type State struct {
mode uint32
}
// IsTerminal returns true if the given file descriptor is a terminal.
// IsTerminal returns whether the given file descriptor is a terminal.
func IsTerminal(fd int) bool {
var st uint32
err := windows.GetConsoleMode(windows.Handle(fd), &st)
@ -71,13 +73,6 @@ func GetSize(fd int) (width, height int, err error) {
return int(info.Size.X), int(info.Size.Y), nil
}
// passwordReader is an io.Reader that reads from a specific Windows HANDLE.
type passwordReader int
func (r passwordReader) Read(buf []byte) (int, error) {
return windows.Read(windows.Handle(r), buf)
}
// ReadPassword reads a line of input from a terminal without local echo. This
// is commonly used for inputting passwords and other sensitive data. The slice
// returned does not include the \n.
@ -94,9 +89,15 @@ func ReadPassword(fd int) ([]byte, error) {
return nil, err
}
defer func() {
windows.SetConsoleMode(windows.Handle(fd), old)
}()
defer windows.SetConsoleMode(windows.Handle(fd), old)
return readPasswordLine(passwordReader(fd))
var h windows.Handle
p, _ := windows.GetCurrentProcess()
if err := windows.DuplicateHandle(p, windows.Handle(fd), p, &h, 0, false, windows.DUPLICATE_SAME_ACCESS); err != nil {
return nil, err
}
f := os.NewFile(uintptr(h), "stdin")
defer f.Close()
return readPasswordLine(f)
}

View File

@ -2,18 +2,15 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.7
// Package ctxhttp provides helper functions for performing context-aware HTTP requests.
package ctxhttp // import "golang.org/x/net/context/ctxhttp"
import (
"context"
"io"
"net/http"
"net/url"
"strings"
"golang.org/x/net/context"
)
// Do sends an HTTP request with the provided http.Client and returns

View File

@ -1,147 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !go1.7
package ctxhttp // import "golang.org/x/net/context/ctxhttp"
import (
"io"
"net/http"
"net/url"
"strings"
"golang.org/x/net/context"
)
func nop() {}
var (
testHookContextDoneBeforeHeaders = nop
testHookDoReturned = nop
testHookDidBodyClose = nop
)
// Do sends an HTTP request with the provided http.Client and returns an HTTP response.
// If the client is nil, http.DefaultClient is used.
// If the context is canceled or times out, ctx.Err() will be returned.
func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
if client == nil {
client = http.DefaultClient
}
// TODO(djd): Respect any existing value of req.Cancel.
cancel := make(chan struct{})
req.Cancel = cancel
type responseAndError struct {
resp *http.Response
err error
}
result := make(chan responseAndError, 1)
// Make local copies of test hooks closed over by goroutines below.
// Prevents data races in tests.
testHookDoReturned := testHookDoReturned
testHookDidBodyClose := testHookDidBodyClose
go func() {
resp, err := client.Do(req)
testHookDoReturned()
result <- responseAndError{resp, err}
}()
var resp *http.Response
select {
case <-ctx.Done():
testHookContextDoneBeforeHeaders()
close(cancel)
// Clean up after the goroutine calling client.Do:
go func() {
if r := <-result; r.resp != nil {
testHookDidBodyClose()
r.resp.Body.Close()
}
}()
return nil, ctx.Err()
case r := <-result:
var err error
resp, err = r.resp, r.err
if err != nil {
return resp, err
}
}
c := make(chan struct{})
go func() {
select {
case <-ctx.Done():
close(cancel)
case <-c:
// The response's Body is closed.
}
}()
resp.Body = &notifyingReader{resp.Body, c}
return resp, nil
}
// Get issues a GET request via the Do function.
func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, err
}
return Do(ctx, client, req)
}
// Head issues a HEAD request via the Do function.
func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
req, err := http.NewRequest("HEAD", url, nil)
if err != nil {
return nil, err
}
return Do(ctx, client, req)
}
// Post issues a POST request via the Do function.
func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) {
req, err := http.NewRequest("POST", url, body)
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", bodyType)
return Do(ctx, client, req)
}
// PostForm issues a POST request via the Do function.
func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) {
return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
}
// notifyingReader is an io.ReadCloser that closes the notify channel after
// Close is called or a Read fails on the underlying ReadCloser.
type notifyingReader struct {
io.ReadCloser
notify chan<- struct{}
}
func (r *notifyingReader) Read(p []byte) (int, error) {
n, err := r.ReadCloser.Read(p)
if err != nil && r.notify != nil {
close(r.notify)
r.notify = nil
}
return n, err
}
func (r *notifyingReader) Close() error {
err := r.ReadCloser.Close()
if r.notify != nil {
close(r.notify)
r.notify = nil
}
return err
}

712
vendor/golang.org/x/net/html/atom/gen.go generated vendored Normal file
View File

@ -0,0 +1,712 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
//go:generate go run gen.go
//go:generate go run gen.go -test
package main
import (
"bytes"
"flag"
"fmt"
"go/format"
"io/ioutil"
"math/rand"
"os"
"sort"
"strings"
)
// identifier converts s to a Go exported identifier.
// It converts "div" to "Div" and "accept-charset" to "AcceptCharset".
func identifier(s string) string {
b := make([]byte, 0, len(s))
cap := true
for _, c := range s {
if c == '-' {
cap = true
continue
}
if cap && 'a' <= c && c <= 'z' {
c -= 'a' - 'A'
}
cap = false
b = append(b, byte(c))
}
return string(b)
}
var test = flag.Bool("test", false, "generate table_test.go")
func genFile(name string, buf *bytes.Buffer) {
b, err := format.Source(buf.Bytes())
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
if err := ioutil.WriteFile(name, b, 0644); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
func main() {
flag.Parse()
var all []string
all = append(all, elements...)
all = append(all, attributes...)
all = append(all, eventHandlers...)
all = append(all, extra...)
sort.Strings(all)
// uniq - lists have dups
w := 0
for _, s := range all {
if w == 0 || all[w-1] != s {
all[w] = s
w++
}
}
all = all[:w]
if *test {
var buf bytes.Buffer
fmt.Fprintln(&buf, "// Code generated by go generate gen.go; DO NOT EDIT.\n")
fmt.Fprintln(&buf, "//go:generate go run gen.go -test\n")
fmt.Fprintln(&buf, "package atom\n")
fmt.Fprintln(&buf, "var testAtomList = []string{")
for _, s := range all {
fmt.Fprintf(&buf, "\t%q,\n", s)
}
fmt.Fprintln(&buf, "}")
genFile("table_test.go", &buf)
return
}
// Find hash that minimizes table size.
var best *table
for i := 0; i < 1000000; i++ {
if best != nil && 1<<(best.k-1) < len(all) {
break
}
h := rand.Uint32()
for k := uint(0); k <= 16; k++ {
if best != nil && k >= best.k {
break
}
var t table
if t.init(h, k, all) {
best = &t
break
}
}
}
if best == nil {
fmt.Fprintf(os.Stderr, "failed to construct string table\n")
os.Exit(1)
}
// Lay out strings, using overlaps when possible.
layout := append([]string{}, all...)
// Remove strings that are substrings of other strings
for changed := true; changed; {
changed = false
for i, s := range layout {
if s == "" {
continue
}
for j, t := range layout {
if i != j && t != "" && strings.Contains(s, t) {
changed = true
layout[j] = ""
}
}
}
}
// Join strings where one suffix matches another prefix.
for {
// Find best i, j, k such that layout[i][len-k:] == layout[j][:k],
// maximizing overlap length k.
besti := -1
bestj := -1
bestk := 0
for i, s := range layout {
if s == "" {
continue
}
for j, t := range layout {
if i == j {
continue
}
for k := bestk + 1; k <= len(s) && k <= len(t); k++ {
if s[len(s)-k:] == t[:k] {
besti = i
bestj = j
bestk = k
}
}
}
}
if bestk > 0 {
layout[besti] += layout[bestj][bestk:]
layout[bestj] = ""
continue
}
break
}
text := strings.Join(layout, "")
atom := map[string]uint32{}
for _, s := range all {
off := strings.Index(text, s)
if off < 0 {
panic("lost string " + s)
}
atom[s] = uint32(off<<8 | len(s))
}
var buf bytes.Buffer
// Generate the Go code.
fmt.Fprintln(&buf, "// Code generated by go generate gen.go; DO NOT EDIT.\n")
fmt.Fprintln(&buf, "//go:generate go run gen.go\n")
fmt.Fprintln(&buf, "package atom\n\nconst (")
// compute max len
maxLen := 0
for _, s := range all {
if maxLen < len(s) {
maxLen = len(s)
}
fmt.Fprintf(&buf, "\t%s Atom = %#x\n", identifier(s), atom[s])
}
fmt.Fprintln(&buf, ")\n")
fmt.Fprintf(&buf, "const hash0 = %#x\n\n", best.h0)
fmt.Fprintf(&buf, "const maxAtomLen = %d\n\n", maxLen)
fmt.Fprintf(&buf, "var table = [1<<%d]Atom{\n", best.k)
for i, s := range best.tab {
if s == "" {
continue
}
fmt.Fprintf(&buf, "\t%#x: %#x, // %s\n", i, atom[s], s)
}
fmt.Fprintf(&buf, "}\n")
datasize := (1 << best.k) * 4
fmt.Fprintln(&buf, "const atomText =")
textsize := len(text)
for len(text) > 60 {
fmt.Fprintf(&buf, "\t%q +\n", text[:60])
text = text[60:]
}
fmt.Fprintf(&buf, "\t%q\n\n", text)
genFile("table.go", &buf)
fmt.Fprintf(os.Stdout, "%d atoms; %d string bytes + %d tables = %d total data\n", len(all), textsize, datasize, textsize+datasize)
}
type byLen []string
func (x byLen) Less(i, j int) bool { return len(x[i]) > len(x[j]) }
func (x byLen) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byLen) Len() int { return len(x) }
// fnv computes the FNV hash with an arbitrary starting value h.
func fnv(h uint32, s string) uint32 {
for i := 0; i < len(s); i++ {
h ^= uint32(s[i])
h *= 16777619
}
return h
}
// A table represents an attempt at constructing the lookup table.
// The lookup table uses cuckoo hashing, meaning that each string
// can be found in one of two positions.
type table struct {
h0 uint32
k uint
mask uint32
tab []string
}
// hash returns the two hashes for s.
func (t *table) hash(s string) (h1, h2 uint32) {
h := fnv(t.h0, s)
h1 = h & t.mask
h2 = (h >> 16) & t.mask
return
}
// init initializes the table with the given parameters.
// h0 is the initial hash value,
// k is the number of bits of hash value to use, and
// x is the list of strings to store in the table.
// init returns false if the table cannot be constructed.
func (t *table) init(h0 uint32, k uint, x []string) bool {
t.h0 = h0
t.k = k
t.tab = make([]string, 1<<k)
t.mask = 1<<k - 1
for _, s := range x {
if !t.insert(s) {
return false
}
}
return true
}
// insert inserts s in the table.
func (t *table) insert(s string) bool {
h1, h2 := t.hash(s)
if t.tab[h1] == "" {
t.tab[h1] = s
return true
}
if t.tab[h2] == "" {
t.tab[h2] = s
return true
}
if t.push(h1, 0) {
t.tab[h1] = s
return true
}
if t.push(h2, 0) {
t.tab[h2] = s
return true
}
return false
}
// push attempts to push aside the entry in slot i.
func (t *table) push(i uint32, depth int) bool {
if depth > len(t.tab) {
return false
}
s := t.tab[i]
h1, h2 := t.hash(s)
j := h1 + h2 - i
if t.tab[j] != "" && !t.push(j, depth+1) {
return false
}
t.tab[j] = s
return true
}
// The lists of element names and attribute keys were taken from
// https://html.spec.whatwg.org/multipage/indices.html#index
// as of the "HTML Living Standard - Last Updated 16 April 2018" version.
// "command", "keygen" and "menuitem" have been removed from the spec,
// but are kept here for backwards compatibility.
var elements = []string{
"a",
"abbr",
"address",
"area",
"article",
"aside",
"audio",
"b",
"base",
"bdi",
"bdo",
"blockquote",
"body",
"br",
"button",
"canvas",
"caption",
"cite",
"code",
"col",
"colgroup",
"command",
"data",
"datalist",
"dd",
"del",
"details",
"dfn",
"dialog",
"div",
"dl",
"dt",
"em",
"embed",
"fieldset",
"figcaption",
"figure",
"footer",
"form",
"h1",
"h2",
"h3",
"h4",
"h5",
"h6",
"head",
"header",
"hgroup",
"hr",
"html",
"i",
"iframe",
"img",
"input",
"ins",
"kbd",
"keygen",
"label",
"legend",
"li",
"link",
"main",
"map",
"mark",
"menu",
"menuitem",
"meta",
"meter",
"nav",
"noscript",
"object",
"ol",
"optgroup",
"option",
"output",
"p",
"param",
"picture",
"pre",
"progress",
"q",
"rp",
"rt",
"ruby",
"s",
"samp",
"script",
"section",
"select",
"slot",
"small",
"source",
"span",
"strong",
"style",
"sub",
"summary",
"sup",
"table",
"tbody",
"td",
"template",
"textarea",
"tfoot",
"th",
"thead",
"time",
"title",
"tr",
"track",
"u",
"ul",
"var",
"video",
"wbr",
}
// https://html.spec.whatwg.org/multipage/indices.html#attributes-3
//
// "challenge", "command", "contextmenu", "dropzone", "icon", "keytype", "mediagroup",
// "radiogroup", "spellcheck", "scoped", "seamless", "sortable" and "sorted" have been removed from the spec,
// but are kept here for backwards compatibility.
var attributes = []string{
"abbr",
"accept",
"accept-charset",
"accesskey",
"action",
"allowfullscreen",
"allowpaymentrequest",
"allowusermedia",
"alt",
"as",
"async",
"autocomplete",
"autofocus",
"autoplay",
"challenge",
"charset",
"checked",
"cite",
"class",
"color",
"cols",
"colspan",
"command",
"content",
"contenteditable",
"contextmenu",
"controls",
"coords",
"crossorigin",
"data",
"datetime",
"default",
"defer",
"dir",
"dirname",
"disabled",
"download",
"draggable",
"dropzone",
"enctype",
"for",
"form",
"formaction",
"formenctype",
"formmethod",
"formnovalidate",
"formtarget",
"headers",
"height",
"hidden",
"high",
"href",
"hreflang",
"http-equiv",
"icon",
"id",
"inputmode",
"integrity",
"is",
"ismap",
"itemid",
"itemprop",
"itemref",
"itemscope",
"itemtype",
"keytype",
"kind",
"label",
"lang",
"list",
"loop",
"low",
"manifest",
"max",
"maxlength",
"media",
"mediagroup",
"method",
"min",
"minlength",
"multiple",
"muted",
"name",
"nomodule",
"nonce",
"novalidate",
"open",
"optimum",
"pattern",
"ping",
"placeholder",
"playsinline",
"poster",
"preload",
"radiogroup",
"readonly",
"referrerpolicy",
"rel",
"required",
"reversed",
"rows",
"rowspan",
"sandbox",
"spellcheck",
"scope",
"scoped",
"seamless",
"selected",
"shape",
"size",
"sizes",
"sortable",
"sorted",
"slot",
"span",
"spellcheck",
"src",
"srcdoc",
"srclang",
"srcset",
"start",
"step",
"style",
"tabindex",
"target",
"title",
"translate",
"type",
"typemustmatch",
"updateviacache",
"usemap",
"value",
"width",
"workertype",
"wrap",
}
// "onautocomplete", "onautocompleteerror", "onmousewheel",
// "onshow" and "onsort" have been removed from the spec,
// but are kept here for backwards compatibility.
var eventHandlers = []string{
"onabort",
"onautocomplete",
"onautocompleteerror",
"onauxclick",
"onafterprint",
"onbeforeprint",
"onbeforeunload",
"onblur",
"oncancel",
"oncanplay",
"oncanplaythrough",
"onchange",
"onclick",
"onclose",
"oncontextmenu",
"oncopy",
"oncuechange",
"oncut",
"ondblclick",
"ondrag",
"ondragend",
"ondragenter",
"ondragexit",
"ondragleave",
"ondragover",
"ondragstart",
"ondrop",
"ondurationchange",
"onemptied",
"onended",
"onerror",
"onfocus",
"onhashchange",
"oninput",
"oninvalid",
"onkeydown",
"onkeypress",
"onkeyup",
"onlanguagechange",
"onload",
"onloadeddata",
"onloadedmetadata",
"onloadend",
"onloadstart",
"onmessage",
"onmessageerror",
"onmousedown",
"onmouseenter",
"onmouseleave",
"onmousemove",
"onmouseout",
"onmouseover",
"onmouseup",
"onmousewheel",
"onwheel",
"onoffline",
"ononline",
"onpagehide",
"onpageshow",
"onpaste",
"onpause",
"onplay",
"onplaying",
"onpopstate",
"onprogress",
"onratechange",
"onreset",
"onresize",
"onrejectionhandled",
"onscroll",
"onsecuritypolicyviolation",
"onseeked",
"onseeking",
"onselect",
"onshow",
"onsort",
"onstalled",
"onstorage",
"onsubmit",
"onsuspend",
"ontimeupdate",
"ontoggle",
"onunhandledrejection",
"onunload",
"onvolumechange",
"onwaiting",
}
// extra are ad-hoc values not covered by any of the lists above.
var extra = []string{
"acronym",
"align",
"annotation",
"annotation-xml",
"applet",
"basefont",
"bgsound",
"big",
"blink",
"center",
"color",
"desc",
"face",
"font",
"foreignObject", // HTML is case-insensitive, but SVG-embedded-in-HTML is case-sensitive.
"foreignobject",
"frame",
"frameset",
"image",
"isindex",
"listing",
"malignmark",
"marquee",
"math",
"mglyph",
"mi",
"mn",
"mo",
"ms",
"mtext",
"nobr",
"noembed",
"noframes",
"plaintext",
"prompt",
"public",
"rb",
"rtc",
"spacer",
"strike",
"svg",
"system",
"tt",
"xmp",
}

View File

@ -97,8 +97,16 @@ func isSpecialElement(element *Node) bool {
switch element.Namespace {
case "", "html":
return isSpecialElementMap[element.Data]
case "math":
switch element.Data {
case "mi", "mo", "mn", "ms", "mtext", "annotation-xml":
return true
}
case "svg":
return element.Data == "foreignObject"
switch element.Data {
case "foreignObject", "desc", "title":
return true
}
}
return false
}

132
vendor/golang.org/x/net/html/parse.go generated vendored
View File

@ -209,27 +209,6 @@ loop:
p.oe = p.oe[:i+1]
}
// generateAllImpliedEndTags pops nodes off the stack of open elements as long as
// the top node has a tag name of caption, colgroup, dd, div, dt, li, optgroup, option, p, rb,
// rp, rt, rtc, span, tbody, td, tfoot, th, thead or tr.
func (p *parser) generateAllImpliedEndTags() {
var i int
for i = len(p.oe) - 1; i >= 0; i-- {
n := p.oe[i]
if n.Type == ElementNode {
switch n.DataAtom {
// TODO: remove this divergence from the HTML5 spec
case a.Caption, a.Colgroup, a.Dd, a.Div, a.Dt, a.Li, a.Optgroup, a.Option, a.P, a.Rb,
a.Rp, a.Rt, a.Rtc, a.Span, a.Tbody, a.Td, a.Tfoot, a.Th, a.Thead, a.Tr:
continue
}
}
break
}
p.oe = p.oe[:i+1]
}
// addChild adds a child node n to the top element, and pushes n onto the stack
// of open elements if it is an element node.
func (p *parser) addChild(n *Node) {
@ -276,7 +255,7 @@ func (p *parser) fosterParent(n *Node) {
}
}
if template != nil && (table == nil || j < i) {
if template != nil && (table == nil || j > i) {
template.AppendChild(n)
return
}
@ -491,6 +470,10 @@ func (p *parser) resetInsertionMode() {
case a.Table:
p.im = inTableIM
case a.Template:
// TODO: remove this divergence from the HTML5 spec.
if n.Namespace != "" {
continue
}
p.im = p.templateStack.top()
case a.Head:
// TODO: remove this divergence from the HTML5 spec.
@ -679,11 +662,16 @@ func inHeadIM(p *parser) bool {
if !p.oe.contains(a.Template) {
return true
}
p.generateAllImpliedEndTags()
if n := p.oe.top(); n.DataAtom != a.Template {
return true
// TODO: remove this divergence from the HTML5 spec.
//
// See https://bugs.chromium.org/p/chromium/issues/detail?id=829668
p.generateImpliedEndTags()
for i := len(p.oe) - 1; i >= 0; i-- {
if n := p.oe[i]; n.Namespace == "" && n.DataAtom == a.Template {
p.oe = p.oe[:i]
break
}
}
p.popUntil(defaultScope, a.Template)
p.clearActiveFormattingElements()
p.templateStack.pop()
p.resetInsertionMode()
@ -860,9 +848,13 @@ func inBodyIM(p *parser) bool {
// The newline, if any, will be dealt with by the TextToken case.
p.framesetOK = false
case a.Form:
if p.oe.contains(a.Template) || p.form == nil {
p.popUntil(buttonScope, a.P)
p.addElement()
if p.form != nil && !p.oe.contains(a.Template) {
// Ignore the token
return true
}
p.popUntil(buttonScope, a.P)
p.addElement()
if !p.oe.contains(a.Template) {
p.form = p.top()
}
case a.Li:
@ -996,6 +988,14 @@ func inBodyIM(p *parser) bool {
p.acknowledgeSelfClosingTag()
p.popUntil(buttonScope, a.P)
p.parseImpliedToken(StartTagToken, a.Form, a.Form.String())
if p.form == nil {
// NOTE: The 'isindex' element has been removed,
// and the 'template' element has not been designed to be
// collaborative with the index element.
//
// Ignore the token.
return true
}
if action != "" {
p.form.Attr = []Attribute{{Key: "action", Val: action}}
}
@ -1070,13 +1070,7 @@ func inBodyIM(p *parser) bool {
p.acknowledgeSelfClosingTag()
}
return true
case a.Frame:
// TODO: remove this divergence from the HTML5 spec.
if p.oe.contains(a.Template) {
p.addElement()
return true
}
case a.Caption, a.Col, a.Colgroup, a.Head, a.Tbody, a.Td, a.Tfoot, a.Th, a.Thead, a.Tr:
case a.Caption, a.Col, a.Colgroup, a.Frame, a.Head, a.Tbody, a.Td, a.Tfoot, a.Th, a.Thead, a.Tr:
// Ignore the token.
default:
p.reconstructActiveFormattingElements()
@ -1098,12 +1092,13 @@ func inBodyIM(p *parser) bool {
p.popUntil(defaultScope, p.tok.DataAtom)
case a.Form:
if p.oe.contains(a.Template) {
if !p.oe.contains(a.Form) {
i := p.indexOfElementInScope(defaultScope, a.Form)
if i == -1 {
// Ignore the token.
return true
}
p.generateImpliedEndTags()
if p.tok.DataAtom == a.Form {
if p.oe[i].DataAtom != a.Form {
// Ignore the token.
return true
}
@ -1269,12 +1264,6 @@ func (p *parser) inBodyEndTagFormatting(tagAtom a.Atom) {
switch commonAncestor.DataAtom {
case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr:
p.fosterParent(lastNode)
case a.Template:
// TODO: remove namespace checking
if commonAncestor.Namespace == "html" {
commonAncestor = commonAncestor.LastChild
}
fallthrough
default:
commonAncestor.AppendChild(lastNode)
}
@ -1346,9 +1335,6 @@ func textIM(p *parser) bool {
// Section 12.2.6.4.9.
func inTableIM(p *parser) bool {
switch p.tok.Type {
case ErrorToken:
// Stop parsing.
return true
case TextToken:
p.tok.Data = strings.Replace(p.tok.Data, "\x00", "", -1)
switch p.oe.top().DataAtom {
@ -1443,6 +1429,8 @@ func inTableIM(p *parser) bool {
case DoctypeToken:
// Ignore the token.
return true
case ErrorToken:
return inBodyIM(p)
}
p.fosterParenting = true
@ -1545,6 +1533,8 @@ func inColumnGroupIM(p *parser) bool {
case a.Template:
return inHeadIM(p)
}
case ErrorToken:
return inBodyIM(p)
}
if p.oe.top().DataAtom != a.Colgroup {
return true
@ -1709,9 +1699,6 @@ func inCellIM(p *parser) bool {
// Section 12.2.6.4.16.
func inSelectIM(p *parser) bool {
switch p.tok.Type {
case ErrorToken:
// Stop parsing.
return true
case TextToken:
p.addText(strings.Replace(p.tok.Data, "\x00", "", -1))
case StartTagToken:
@ -1775,6 +1762,8 @@ func inSelectIM(p *parser) bool {
case DoctypeToken:
// Ignore the token.
return true
case ErrorToken:
return inBodyIM(p)
}
return true
@ -1841,15 +1830,26 @@ func inTemplateIM(p *parser) bool {
// Ignore the token.
return true
}
case ErrorToken:
if !p.oe.contains(a.Template) {
// Ignore the token.
return true
}
// TODO: remove this divergence from the HTML5 spec.
//
// See https://bugs.chromium.org/p/chromium/issues/detail?id=829668
p.generateImpliedEndTags()
for i := len(p.oe) - 1; i >= 0; i-- {
if n := p.oe[i]; n.Namespace == "" && n.DataAtom == a.Template {
p.oe = p.oe[:i]
break
}
}
p.clearActiveFormattingElements()
p.templateStack.pop()
p.resetInsertionMode()
return false
}
if !p.oe.contains(a.Template) {
// Ignore the token.
return true
}
p.popUntil(defaultScope, a.Template)
p.clearActiveFormattingElements()
p.templateStack.pop()
p.resetInsertionMode()
return false
}
@ -1923,11 +1923,6 @@ func inFramesetIM(p *parser) bool {
p.acknowledgeSelfClosingTag()
case a.Noframes:
return inHeadIM(p)
case a.Template:
// TODO: remove this divergence from the HTML5 spec.
//
// See https://bugs.chromium.org/p/chromium/issues/detail?id=829668
return inTemplateIM(p)
}
case EndTagToken:
switch p.tok.DataAtom {
@ -2220,6 +2215,15 @@ func (p *parser) parse() error {
}
// Parse returns the parse tree for the HTML from the given Reader.
//
// It implements the HTML5 parsing algorithm
// (https://html.spec.whatwg.org/multipage/syntax.html#tree-construction),
// which is very complicated. The resultant tree can contain implicitly created
// nodes that have no explicit <tag> listed in r's data, and nodes' parents can
// differ from the nesting implied by a naive processing of start and end
// <tag>s. Conversely, explicit <tag>s in r's data can be silently dropped,
// with no corresponding node in the resulting tree.
//
// The input is assumed to be UTF-8 encoded.
func Parse(r io.Reader) (*Node, error) {
p := &parser{
@ -2241,6 +2245,8 @@ func Parse(r io.Reader) (*Node, error) {
// ParseFragment parses a fragment of HTML and returns the nodes that were
// found. If the fragment is the InnerHTML for an existing element, pass that
// element in context.
//
// It has the same intricacies as Parse.
func ParseFragment(r io.Reader, context *Node) ([]*Node, error) {
contextTag := ""
if context != nil {

View File

@ -14,21 +14,6 @@ import (
"strings"
)
// SniffedContentType reports whether ct is a Content-Type that is known
// to cause client-side content sniffing.
//
// This provides just a partial implementation of mime.ParseMediaType
// with the assumption that the Content-Type is not attacker controlled.
func SniffedContentType(ct string) bool {
if i := strings.Index(ct, ";"); i != -1 {
ct = ct[:i]
}
ct = strings.ToLower(strings.TrimSpace(ct))
return ct == "text/plain" || ct == "application/octet-stream" ||
ct == "application/unknown" || ct == "unknown/unknown" || ct == "*/*" ||
!strings.Contains(ct, "/")
}
// ValidTrailerHeader reports whether name is a valid header field name to appear
// in trailers.
// See RFC 7230, Section 4.1.2

View File

@ -52,9 +52,31 @@ const (
noDialOnMiss = false
)
// shouldTraceGetConn reports whether getClientConn should call any
// ClientTrace.GetConn hook associated with the http.Request.
//
// This complexity is needed to avoid double calls of the GetConn hook
// during the back-and-forth between net/http and x/net/http2 (when the
// net/http.Transport is upgraded to also speak http2), as well as support
// the case where x/net/http2 is being used directly.
func (p *clientConnPool) shouldTraceGetConn(st clientConnIdleState) bool {
// If our Transport wasn't made via ConfigureTransport, always
// trace the GetConn hook if provided, because that means the
// http2 package is being used directly and it's the one
// dialing, as opposed to net/http.
if _, ok := p.t.ConnPool.(noDialClientConnPool); !ok {
return true
}
// Otherwise, only use the GetConn hook if this connection has
// been used previously for other requests. For fresh
// connections, the net/http package does the dialing.
return !st.freshConn
}
func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) {
if isConnectionCloseRequest(req) && dialOnMiss {
// It gets its own connection.
traceGetConn(req, addr)
const singleUse = true
cc, err := p.t.dialClientConn(addr, singleUse)
if err != nil {
@ -64,7 +86,10 @@ func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMis
}
p.mu.Lock()
for _, cc := range p.conns[addr] {
if cc.CanTakeNewRequest() {
if st := cc.idleState(); st.canTakeNewRequest {
if p.shouldTraceGetConn(st) {
traceGetConn(req, addr)
}
p.mu.Unlock()
return cc, nil
}
@ -73,6 +98,7 @@ func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMis
p.mu.Unlock()
return nil, ErrNoCachedConn
}
traceGetConn(req, addr)
call := p.getStartDialLocked(addr)
p.mu.Unlock()
<-call.done

View File

@ -1,80 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.6
package http2
import (
"crypto/tls"
"fmt"
"net/http"
)
func configureTransport(t1 *http.Transport) (*Transport, error) {
connPool := new(clientConnPool)
t2 := &Transport{
ConnPool: noDialClientConnPool{connPool},
t1: t1,
}
connPool.t = t2
if err := registerHTTPSProtocol(t1, noDialH2RoundTripper{t2}); err != nil {
return nil, err
}
if t1.TLSClientConfig == nil {
t1.TLSClientConfig = new(tls.Config)
}
if !strSliceContains(t1.TLSClientConfig.NextProtos, "h2") {
t1.TLSClientConfig.NextProtos = append([]string{"h2"}, t1.TLSClientConfig.NextProtos...)
}
if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") {
t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1")
}
upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper {
addr := authorityAddr("https", authority)
if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil {
go c.Close()
return erringRoundTripper{err}
} else if !used {
// Turns out we don't need this c.
// For example, two goroutines made requests to the same host
// at the same time, both kicking off TCP dials. (since protocol
// was unknown)
go c.Close()
}
return t2
}
if m := t1.TLSNextProto; len(m) == 0 {
t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{
"h2": upgradeFn,
}
} else {
m["h2"] = upgradeFn
}
return t2, nil
}
// registerHTTPSProtocol calls Transport.RegisterProtocol but
// converting panics into errors.
func registerHTTPSProtocol(t *http.Transport, rt http.RoundTripper) (err error) {
defer func() {
if e := recover(); e != nil {
err = fmt.Errorf("%v", e)
}
}()
t.RegisterProtocol("https", rt)
return nil
}
// noDialH2RoundTripper is a RoundTripper which only tries to complete the request
// if there's already has a cached connection to the host.
type noDialH2RoundTripper struct{ t *Transport }
func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
res, err := rt.t.RoundTrip(req)
if isNoCachedConnError(err) {
return nil, http.ErrSkipAltProtocol
}
return res, err
}

View File

@ -41,10 +41,10 @@ func (f *flow) take(n int32) {
// add adds n bytes (positive or negative) to the flow control window.
// It returns false if the sum would exceed 2^31-1.
func (f *flow) add(n int32) bool {
remain := (1<<31 - 1) - f.n
if n > remain {
return false
sum := f.n + n
if (sum > n) == (f.n > 0) {
f.n = sum
return true
}
f.n += n
return true
return false
}

View File

@ -733,32 +733,67 @@ func (f *SettingsFrame) IsAck() bool {
return f.FrameHeader.Flags.Has(FlagSettingsAck)
}
func (f *SettingsFrame) Value(s SettingID) (v uint32, ok bool) {
func (f *SettingsFrame) Value(id SettingID) (v uint32, ok bool) {
f.checkValid()
buf := f.p
for len(buf) > 0 {
settingID := SettingID(binary.BigEndian.Uint16(buf[:2]))
if settingID == s {
return binary.BigEndian.Uint32(buf[2:6]), true
for i := 0; i < f.NumSettings(); i++ {
if s := f.Setting(i); s.ID == id {
return s.Val, true
}
buf = buf[6:]
}
return 0, false
}
// Setting returns the setting from the frame at the given 0-based index.
// The index must be >= 0 and less than f.NumSettings().
func (f *SettingsFrame) Setting(i int) Setting {
buf := f.p
return Setting{
ID: SettingID(binary.BigEndian.Uint16(buf[i*6 : i*6+2])),
Val: binary.BigEndian.Uint32(buf[i*6+2 : i*6+6]),
}
}
func (f *SettingsFrame) NumSettings() int { return len(f.p) / 6 }
// HasDuplicates reports whether f contains any duplicate setting IDs.
func (f *SettingsFrame) HasDuplicates() bool {
num := f.NumSettings()
if num == 0 {
return false
}
// If it's small enough (the common case), just do the n^2
// thing and avoid a map allocation.
if num < 10 {
for i := 0; i < num; i++ {
idi := f.Setting(i).ID
for j := i + 1; j < num; j++ {
idj := f.Setting(j).ID
if idi == idj {
return true
}
}
}
return false
}
seen := map[SettingID]bool{}
for i := 0; i < num; i++ {
id := f.Setting(i).ID
if seen[id] {
return true
}
seen[id] = true
}
return false
}
// ForeachSetting runs fn for each setting.
// It stops and returns the first error.
func (f *SettingsFrame) ForeachSetting(fn func(Setting) error) error {
f.checkValid()
buf := f.p
for len(buf) > 0 {
if err := fn(Setting{
SettingID(binary.BigEndian.Uint16(buf[:2])),
binary.BigEndian.Uint32(buf[2:6]),
}); err != nil {
for i := 0; i < f.NumSettings(); i++ {
if err := fn(f.Setting(i)); err != nil {
return err
}
buf = buf[6:]
}
return nil
}
@ -1442,7 +1477,7 @@ func (fr *Framer) maxHeaderStringLen() int {
}
// readMetaFrame returns 0 or more CONTINUATION frames from fr and
// merge them into into the provided hf and returns a MetaHeadersFrame
// merge them into the provided hf and returns a MetaHeadersFrame
// with the decoded hpack values.
func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) {
if fr.AllowIllegalReads {

29
vendor/golang.org/x/net/http2/go111.go generated vendored Normal file
View File

@ -0,0 +1,29 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.11
package http2
import (
"net/http/httptrace"
"net/textproto"
)
func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool {
return trace != nil && trace.WroteHeaderField != nil
}
func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {
if trace != nil && trace.WroteHeaderField != nil {
trace.WroteHeaderField(k, []string{v})
}
}
func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error {
if trace != nil {
return trace.Got1xxResponse
}
return nil
}

View File

@ -1,16 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.6
package http2
import (
"net/http"
"time"
)
func transportExpectContinueTimeout(t1 *http.Transport) time.Duration {
return t1.ExpectContinueTimeout
}

106
vendor/golang.org/x/net/http2/go17.go generated vendored
View File

@ -1,106 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.7
package http2
import (
"context"
"net"
"net/http"
"net/http/httptrace"
"time"
)
type contextContext interface {
context.Context
}
func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) {
ctx, cancel = context.WithCancel(context.Background())
ctx = context.WithValue(ctx, http.LocalAddrContextKey, c.LocalAddr())
if hs := opts.baseConfig(); hs != nil {
ctx = context.WithValue(ctx, http.ServerContextKey, hs)
}
return
}
func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) {
return context.WithCancel(ctx)
}
func requestWithContext(req *http.Request, ctx contextContext) *http.Request {
return req.WithContext(ctx)
}
type clientTrace httptrace.ClientTrace
func reqContext(r *http.Request) context.Context { return r.Context() }
func (t *Transport) idleConnTimeout() time.Duration {
if t.t1 != nil {
return t.t1.IdleConnTimeout
}
return 0
}
func setResponseUncompressed(res *http.Response) { res.Uncompressed = true }
func traceGotConn(req *http.Request, cc *ClientConn) {
trace := httptrace.ContextClientTrace(req.Context())
if trace == nil || trace.GotConn == nil {
return
}
ci := httptrace.GotConnInfo{Conn: cc.tconn}
cc.mu.Lock()
ci.Reused = cc.nextStreamID > 1
ci.WasIdle = len(cc.streams) == 0 && ci.Reused
if ci.WasIdle && !cc.lastActive.IsZero() {
ci.IdleTime = time.Now().Sub(cc.lastActive)
}
cc.mu.Unlock()
trace.GotConn(ci)
}
func traceWroteHeaders(trace *clientTrace) {
if trace != nil && trace.WroteHeaders != nil {
trace.WroteHeaders()
}
}
func traceGot100Continue(trace *clientTrace) {
if trace != nil && trace.Got100Continue != nil {
trace.Got100Continue()
}
}
func traceWait100Continue(trace *clientTrace) {
if trace != nil && trace.Wait100Continue != nil {
trace.Wait100Continue()
}
}
func traceWroteRequest(trace *clientTrace, err error) {
if trace != nil && trace.WroteRequest != nil {
trace.WroteRequest(httptrace.WroteRequestInfo{Err: err})
}
}
func traceFirstResponseByte(trace *clientTrace) {
if trace != nil && trace.GotFirstResponseByte != nil {
trace.GotFirstResponseByte()
}
}
func requestTrace(req *http.Request) *clientTrace {
trace := httptrace.ContextClientTrace(req.Context())
return (*clientTrace)(trace)
}
// Ping sends a PING frame to the server and waits for the ack.
func (cc *ClientConn) Ping(ctx context.Context) error {
return cc.ping(ctx)
}

View File

@ -1,36 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.7,!go1.8
package http2
import "crypto/tls"
// temporary copy of Go 1.7's private tls.Config.clone:
func cloneTLSConfig(c *tls.Config) *tls.Config {
return &tls.Config{
Rand: c.Rand,
Time: c.Time,
Certificates: c.Certificates,
NameToCertificate: c.NameToCertificate,
GetCertificate: c.GetCertificate,
RootCAs: c.RootCAs,
NextProtos: c.NextProtos,
ServerName: c.ServerName,
ClientAuth: c.ClientAuth,
ClientCAs: c.ClientCAs,
InsecureSkipVerify: c.InsecureSkipVerify,
CipherSuites: c.CipherSuites,
PreferServerCipherSuites: c.PreferServerCipherSuites,
SessionTicketsDisabled: c.SessionTicketsDisabled,
SessionTicketKey: c.SessionTicketKey,
ClientSessionCache: c.ClientSessionCache,
MinVersion: c.MinVersion,
MaxVersion: c.MaxVersion,
CurvePreferences: c.CurvePreferences,
DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled,
Renegotiation: c.Renegotiation,
}
}

View File

@ -1,56 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.8
package http2
import (
"crypto/tls"
"io"
"net/http"
)
func cloneTLSConfig(c *tls.Config) *tls.Config {
c2 := c.Clone()
c2.GetClientCertificate = c.GetClientCertificate // golang.org/issue/19264
return c2
}
var _ http.Pusher = (*responseWriter)(nil)
// Push implements http.Pusher.
func (w *responseWriter) Push(target string, opts *http.PushOptions) error {
internalOpts := pushOptions{}
if opts != nil {
internalOpts.Method = opts.Method
internalOpts.Header = opts.Header
}
return w.push(target, internalOpts)
}
func configureServer18(h1 *http.Server, h2 *Server) error {
if h2.IdleTimeout == 0 {
if h1.IdleTimeout != 0 {
h2.IdleTimeout = h1.IdleTimeout
} else {
h2.IdleTimeout = h1.ReadTimeout
}
}
return nil
}
func shouldLogPanic(panicValue interface{}) bool {
return panicValue != nil && panicValue != http.ErrAbortHandler
}
func reqGetBody(req *http.Request) func() (io.ReadCloser, error) {
return req.GetBody
}
func reqBodyIsNoBody(body io.ReadCloser) bool {
return body == http.NoBody
}
func go18httpNoBody() io.ReadCloser { return http.NoBody } // for tests only

View File

@ -1,16 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.9
package http2
import (
"net/http"
)
func configureServer19(s *http.Server, conf *Server) error {
s.RegisterOnShutdown(conf.state.startGracefulShutdown)
return nil
}

View File

@ -7,15 +7,21 @@ package http2
import (
"net/http"
"strings"
"sync"
)
var (
commonLowerHeader = map[string]string{} // Go-Canonical-Case -> lower-case
commonCanonHeader = map[string]string{} // lower-case -> Go-Canonical-Case
commonBuildOnce sync.Once
commonLowerHeader map[string]string // Go-Canonical-Case -> lower-case
commonCanonHeader map[string]string // lower-case -> Go-Canonical-Case
)
func init() {
for _, v := range []string{
func buildCommonHeaderMapsOnce() {
commonBuildOnce.Do(buildCommonHeaderMaps)
}
func buildCommonHeaderMaps() {
common := []string{
"accept",
"accept-charset",
"accept-encoding",
@ -63,7 +69,10 @@ func init() {
"vary",
"via",
"www-authenticate",
} {
}
commonLowerHeader = make(map[string]string, len(common))
commonCanonHeader = make(map[string]string, len(common))
for _, v := range common {
chk := http.CanonicalHeaderKey(v)
commonLowerHeader[chk] = v
commonCanonHeader[v] = chk
@ -71,6 +80,7 @@ func init() {
}
func lowerHeader(v string) string {
buildCommonHeaderMapsOnce()
if s, ok := commonLowerHeader[v]; ok {
return s
}

View File

@ -92,6 +92,8 @@ type Decoder struct {
// saveBuf is previous data passed to Write which we weren't able
// to fully parse before. Unlike buf, we own this data.
saveBuf bytes.Buffer
firstField bool // processing the first field of the header block
}
// NewDecoder returns a new decoder with the provided maximum dynamic
@ -101,6 +103,7 @@ func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decod
d := &Decoder{
emit: emitFunc,
emitEnabled: true,
firstField: true,
}
d.dynTab.table.init()
d.dynTab.allowedMaxSize = maxDynamicTableSize
@ -226,11 +229,15 @@ func (d *Decoder) DecodeFull(p []byte) ([]HeaderField, error) {
return hf, nil
}
// Close declares that the decoding is complete and resets the Decoder
// to be reused again for a new header block. If there is any remaining
// data in the decoder's buffer, Close returns an error.
func (d *Decoder) Close() error {
if d.saveBuf.Len() > 0 {
d.saveBuf.Reset()
return DecodingError{errors.New("truncated headers")}
}
d.firstField = true
return nil
}
@ -266,6 +273,7 @@ func (d *Decoder) Write(p []byte) (n int, err error) {
d.saveBuf.Write(d.buf)
return len(p), nil
}
d.firstField = false
if err != nil {
break
}
@ -391,7 +399,7 @@ func (d *Decoder) callEmit(hf HeaderField) error {
func (d *Decoder) parseDynamicTableSizeUpdate() error {
// RFC 7541, sec 4.2: This dynamic table size update MUST occur at the
// beginning of the first header block following the change to the dynamic table size.
if d.dynTab.size > 0 {
if !d.firstField && d.dynTab.size > 0 {
return DecodingError{errors.New("dynamic table size update MUST occur at the beginning of a header block")}
}

View File

@ -47,6 +47,7 @@ var ErrInvalidHuffman = errors.New("hpack: invalid Huffman-encoded data")
// If maxLen is greater than 0, attempts to write more to buf than
// maxLen bytes will return ErrStringLength.
func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error {
rootHuffmanNode := getRootHuffmanNode()
n := rootHuffmanNode
// cur is the bit buffer that has not been fed into n.
// cbits is the number of low order bits in cur that are valid.
@ -106,7 +107,7 @@ func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error {
type node struct {
// children is non-nil for internal nodes
children []*node
children *[256]*node
// The following are only valid if children is nil:
codeLen uint8 // number of bits that led to the output of sym
@ -114,22 +115,31 @@ type node struct {
}
func newInternalNode() *node {
return &node{children: make([]*node, 256)}
return &node{children: new([256]*node)}
}
var rootHuffmanNode = newInternalNode()
var (
buildRootOnce sync.Once
lazyRootHuffmanNode *node
)
func init() {
func getRootHuffmanNode() *node {
buildRootOnce.Do(buildRootHuffmanNode)
return lazyRootHuffmanNode
}
func buildRootHuffmanNode() {
if len(huffmanCodes) != 256 {
panic("unexpected size")
}
lazyRootHuffmanNode = newInternalNode()
for i, code := range huffmanCodes {
addDecoderNode(byte(i), code, huffmanCodeLen[i])
}
}
func addDecoderNode(sym byte, code uint32, codeLen uint8) {
cur := rootHuffmanNode
cur := lazyRootHuffmanNode
for codeLen > 8 {
codeLen -= 8
i := uint8(code >> codeLen)

View File

@ -201,19 +201,12 @@ func validWireHeaderFieldName(v string) bool {
return true
}
var httpCodeStringCommon = map[int]string{} // n -> strconv.Itoa(n)
func init() {
for i := 100; i <= 999; i++ {
if v := http.StatusText(i); v != "" {
httpCodeStringCommon[i] = strconv.Itoa(i)
}
}
}
func httpCodeString(code int) string {
if s, ok := httpCodeStringCommon[code]; ok {
return s
switch code {
case 200:
return "200"
case 404:
return "404"
}
return strconv.Itoa(code)
}

20
vendor/golang.org/x/net/http2/not_go111.go generated vendored Normal file
View File

@ -0,0 +1,20 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !go1.11
package http2
import (
"net/http/httptrace"
"net/textproto"
)
func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { return false }
func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {}
func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error {
return nil
}

Some files were not shown because too many files have changed in this diff Show More