Merge pull request #5092 from flouthoc/heredoc-experiment
buildah: add `heredoc` support for `RUN`, `COPY` and `ADD`
This commit is contained in:
commit
07482ae885
4
go.mod
4
go.mod
|
@ -18,6 +18,7 @@ require (
|
||||||
github.com/fsouza/go-dockerclient v1.9.7
|
github.com/fsouza/go-dockerclient v1.9.7
|
||||||
github.com/hashicorp/go-multierror v1.1.1
|
github.com/hashicorp/go-multierror v1.1.1
|
||||||
github.com/mattn/go-shellwords v1.0.12
|
github.com/mattn/go-shellwords v1.0.12
|
||||||
|
github.com/moby/buildkit v0.10.6
|
||||||
github.com/onsi/ginkgo v1.16.5
|
github.com/onsi/ginkgo v1.16.5
|
||||||
github.com/onsi/gomega v1.30.0
|
github.com/onsi/gomega v1.30.0
|
||||||
github.com/opencontainers/go-digest v1.0.0
|
github.com/opencontainers/go-digest v1.0.0
|
||||||
|
@ -26,7 +27,7 @@ require (
|
||||||
github.com/opencontainers/runtime-spec v1.1.0
|
github.com/opencontainers/runtime-spec v1.1.0
|
||||||
github.com/opencontainers/runtime-tools v0.9.1-0.20230914150019-408c51e934dc
|
github.com/opencontainers/runtime-tools v0.9.1-0.20230914150019-408c51e934dc
|
||||||
github.com/opencontainers/selinux v1.11.0
|
github.com/opencontainers/selinux v1.11.0
|
||||||
github.com/openshift/imagebuilder v1.2.6-0.20231108213319-b27edc077bbc
|
github.com/openshift/imagebuilder v1.2.6-0.20231110114814-35a50d57f722
|
||||||
github.com/seccomp/libseccomp-golang v0.10.0
|
github.com/seccomp/libseccomp-golang v0.10.0
|
||||||
github.com/sirupsen/logrus v1.9.3
|
github.com/sirupsen/logrus v1.9.3
|
||||||
github.com/spf13/cobra v1.8.0
|
github.com/spf13/cobra v1.8.0
|
||||||
|
@ -55,6 +56,7 @@ require (
|
||||||
github.com/containerd/cgroups/v3 v3.0.2 // indirect
|
github.com/containerd/cgroups/v3 v3.0.2 // indirect
|
||||||
github.com/containerd/log v0.1.0 // indirect
|
github.com/containerd/log v0.1.0 // indirect
|
||||||
github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect
|
github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect
|
||||||
|
github.com/containerd/typeurl v1.0.2 // indirect
|
||||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect
|
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect
|
||||||
github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 // indirect
|
github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 // indirect
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
|
|
8
go.sum
8
go.sum
|
@ -48,6 +48,8 @@ github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
|
||||||
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
|
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
|
||||||
github.com/containerd/stargz-snapshotter/estargz v0.15.1 h1:eXJjw9RbkLFgioVaTG+G/ZW/0kEe2oEKCdS/ZxIyoCU=
|
github.com/containerd/stargz-snapshotter/estargz v0.15.1 h1:eXJjw9RbkLFgioVaTG+G/ZW/0kEe2oEKCdS/ZxIyoCU=
|
||||||
github.com/containerd/stargz-snapshotter/estargz v0.15.1/go.mod h1:gr2RNwukQ/S9Nv33Lt6UC7xEx58C+LHRdoqbEKjz1Kk=
|
github.com/containerd/stargz-snapshotter/estargz v0.15.1/go.mod h1:gr2RNwukQ/S9Nv33Lt6UC7xEx58C+LHRdoqbEKjz1Kk=
|
||||||
|
github.com/containerd/typeurl v1.0.2 h1:Chlt8zIieDbzQFzXzAeBEF92KhExuE4p9p92/QmY7aY=
|
||||||
|
github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s=
|
||||||
github.com/containernetworking/cni v1.1.2 h1:wtRGZVv7olUHMOqouPpn3cXJWpJgM6+EUl31EQbXALQ=
|
github.com/containernetworking/cni v1.1.2 h1:wtRGZVv7olUHMOqouPpn3cXJWpJgM6+EUl31EQbXALQ=
|
||||||
github.com/containernetworking/cni v1.1.2/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw=
|
github.com/containernetworking/cni v1.1.2/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw=
|
||||||
github.com/containernetworking/plugins v1.3.0 h1:QVNXMT6XloyMUoO2wUOqWTC1hWFV62Q6mVDp5H1HnjM=
|
github.com/containernetworking/plugins v1.3.0 h1:QVNXMT6XloyMUoO2wUOqWTC1hWFV62Q6mVDp5H1HnjM=
|
||||||
|
@ -284,6 +286,8 @@ github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR
|
||||||
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||||
|
github.com/moby/buildkit v0.10.6 h1:DJlEuLIgnu34HQKF4n9Eg6q2YqQVC0eOpMb4p2eRS2w=
|
||||||
|
github.com/moby/buildkit v0.10.6/go.mod h1:tQuuyTWtOb9D+RE425cwOCUkX0/oZ+5iBZ+uWpWQ9bU=
|
||||||
github.com/moby/patternmatcher v0.5.0 h1:YCZgJOeULcxLw1Q+sVR636pmS7sPEn1Qo2iAN6M7DBo=
|
github.com/moby/patternmatcher v0.5.0 h1:YCZgJOeULcxLw1Q+sVR636pmS7sPEn1Qo2iAN6M7DBo=
|
||||||
github.com/moby/patternmatcher v0.5.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
|
github.com/moby/patternmatcher v0.5.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
|
||||||
github.com/moby/sys/mountinfo v0.7.1 h1:/tTvQaSJRr2FshkhXiIpux6fQ2Zvc4j7tAhMTStAG2g=
|
github.com/moby/sys/mountinfo v0.7.1 h1:/tTvQaSJRr2FshkhXiIpux6fQ2Zvc4j7tAhMTStAG2g=
|
||||||
|
@ -330,8 +334,8 @@ github.com/opencontainers/runtime-tools v0.9.1-0.20230914150019-408c51e934dc h1:
|
||||||
github.com/opencontainers/runtime-tools v0.9.1-0.20230914150019-408c51e934dc/go.mod h1:8tx1helyqhUC65McMm3x7HmOex8lO2/v9zPuxmKHurs=
|
github.com/opencontainers/runtime-tools v0.9.1-0.20230914150019-408c51e934dc/go.mod h1:8tx1helyqhUC65McMm3x7HmOex8lO2/v9zPuxmKHurs=
|
||||||
github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU=
|
github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU=
|
||||||
github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
|
github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
|
||||||
github.com/openshift/imagebuilder v1.2.6-0.20231108213319-b27edc077bbc h1:ZQ+qN+nVYlNOOx/Nsm5J78je5r+eJfo62pFGisvHtyI=
|
github.com/openshift/imagebuilder v1.2.6-0.20231110114814-35a50d57f722 h1:vhEmg+NeucmSYnT2j9ukkZLrR/ZOFUuUiGhxlBAlW8U=
|
||||||
github.com/openshift/imagebuilder v1.2.6-0.20231108213319-b27edc077bbc/go.mod h1:hFr3F5mM+J/zFaXcZdNzHS0xKuxAYOZOoHQO9D2JvIU=
|
github.com/openshift/imagebuilder v1.2.6-0.20231110114814-35a50d57f722/go.mod h1:+rSifDZnwJPSW2uYHl7ePSVxq4DEu1VlhNR1uIz/Lm4=
|
||||||
github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f h1:/UDgs8FGMqwnHagNDPGOlts35QkhAZ8by3DR7nMih7M=
|
github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f h1:/UDgs8FGMqwnHagNDPGOlts35QkhAZ8by3DR7nMih7M=
|
||||||
github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc=
|
github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc=
|
||||||
github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU=
|
github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU=
|
||||||
|
|
|
@ -7,6 +7,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
@ -35,6 +36,7 @@ import (
|
||||||
"github.com/containers/storage/pkg/chrootarchive"
|
"github.com/containers/storage/pkg/chrootarchive"
|
||||||
"github.com/containers/storage/pkg/unshare"
|
"github.com/containers/storage/pkg/unshare"
|
||||||
docker "github.com/fsouza/go-dockerclient"
|
docker "github.com/fsouza/go-dockerclient"
|
||||||
|
buildkitparser "github.com/moby/buildkit/frontend/dockerfile/parser"
|
||||||
digest "github.com/opencontainers/go-digest"
|
digest "github.com/opencontainers/go-digest"
|
||||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
"github.com/opencontainers/runtime-spec/specs-go"
|
"github.com/opencontainers/runtime-spec/specs-go"
|
||||||
|
@ -348,6 +350,11 @@ func (s *StageExecutor) volumeCacheRestore() error {
|
||||||
// imagebuilder tells us the instruction was "ADD" and not "COPY".
|
// imagebuilder tells us the instruction was "ADD" and not "COPY".
|
||||||
func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) error {
|
func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) error {
|
||||||
s.builder.ContentDigester.Restart()
|
s.builder.ContentDigester.Restart()
|
||||||
|
return s.performCopy(excludes, copies...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StageExecutor) performCopy(excludes []string, copies ...imagebuilder.Copy) error {
|
||||||
|
copiesExtend := []imagebuilder.Copy{}
|
||||||
for _, copy := range copies {
|
for _, copy := range copies {
|
||||||
if err := s.volumeCacheInvalidate(copy.Dest); err != nil {
|
if err := s.volumeCacheInvalidate(copy.Dest); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -362,7 +369,61 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err
|
||||||
stripSetgid := false
|
stripSetgid := false
|
||||||
preserveOwnership := false
|
preserveOwnership := false
|
||||||
contextDir := s.executor.contextDir
|
contextDir := s.executor.contextDir
|
||||||
if len(copy.From) > 0 {
|
// If we are copying files via heredoc syntax, then
|
||||||
|
// its time to create these temporary files on host
|
||||||
|
// and copy these to container
|
||||||
|
if len(copy.Files) > 0 {
|
||||||
|
// If we are copying files from heredoc syntax, there
|
||||||
|
// maybe regular files from context as well so split and
|
||||||
|
// process them differently
|
||||||
|
if len(copy.Src) > len(copy.Files) {
|
||||||
|
regularSources := []string{}
|
||||||
|
for _, src := range copy.Src {
|
||||||
|
// If this source is not a heredoc, then it is a regular file from
|
||||||
|
// build context or from another stage (`--from=`) so treat this differently.
|
||||||
|
if !strings.HasPrefix(src, "<<") {
|
||||||
|
regularSources = append(regularSources, src)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
copyEntry := copy
|
||||||
|
// Remove heredoc if any, since we are already processing them
|
||||||
|
// so create new entry with sources containing regular files
|
||||||
|
// only, since regular files can have different context then
|
||||||
|
// heredoc files.
|
||||||
|
copyEntry.Files = nil
|
||||||
|
copyEntry.Src = regularSources
|
||||||
|
copiesExtend = append(copiesExtend, copyEntry)
|
||||||
|
}
|
||||||
|
copySources := []string{}
|
||||||
|
for _, file := range copy.Files {
|
||||||
|
data := file.Data
|
||||||
|
// remove first break line added while parsing heredoc
|
||||||
|
data = strings.TrimPrefix(data, "\n")
|
||||||
|
// add breakline when heredoc ends for docker compat
|
||||||
|
data = data + "\n"
|
||||||
|
tmpFile, err := os.Create(filepath.Join(parse.GetTempDir(), path.Base(filepath.ToSlash(file.Name))))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to create tmp file for COPY instruction at %q: %w", parse.GetTempDir(), err)
|
||||||
|
}
|
||||||
|
err = tmpFile.Chmod(0644) // 644 is consistent with buildkit
|
||||||
|
if err != nil {
|
||||||
|
tmpFile.Close()
|
||||||
|
return fmt.Errorf("unable to chmod tmp file created for COPY instruction at %q: %w", tmpFile.Name(), err)
|
||||||
|
}
|
||||||
|
defer os.Remove(tmpFile.Name())
|
||||||
|
_, err = tmpFile.WriteString(data)
|
||||||
|
if err != nil {
|
||||||
|
tmpFile.Close()
|
||||||
|
return fmt.Errorf("unable to write contents of heredoc file at %q: %w", tmpFile.Name(), err)
|
||||||
|
}
|
||||||
|
copySources = append(copySources, filepath.Base(tmpFile.Name()))
|
||||||
|
tmpFile.Close()
|
||||||
|
}
|
||||||
|
contextDir = parse.GetTempDir()
|
||||||
|
copy.Src = copySources
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(copy.From) > 0 && len(copy.Files) == 0 {
|
||||||
// If from has an argument within it, resolve it to its
|
// If from has an argument within it, resolve it to its
|
||||||
// value. Otherwise just return the value found.
|
// value. Otherwise just return the value found.
|
||||||
from, fromErr := imagebuilder.ProcessWord(copy.From, s.stage.Builder.Arguments())
|
from, fromErr := imagebuilder.ProcessWord(copy.From, s.stage.Builder.Arguments())
|
||||||
|
@ -486,6 +547,13 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if len(copiesExtend) > 0 {
|
||||||
|
// If we found heredocs and regularfiles together
|
||||||
|
// in same statement then we produced new copies to
|
||||||
|
// process regular files separately since they need
|
||||||
|
// different context.
|
||||||
|
return s.performCopy(excludes, copiesExtend...)
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -591,10 +659,59 @@ func (s *StageExecutor) runStageMountPoints(mountList []string) (map[string]inte
|
||||||
return stageMountPoints, nil
|
return stageMountPoints, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *StageExecutor) createNeededHeredocMountsForRun(files []imagebuilder.File) ([]Mount, error) {
|
||||||
|
mountResult := []Mount{}
|
||||||
|
for _, file := range files {
|
||||||
|
f, err := os.CreateTemp(parse.GetTempDir(), "buildahheredoc")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if _, err := f.WriteString(file.Data); err != nil {
|
||||||
|
f.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
err = f.Chmod(0755)
|
||||||
|
if err != nil {
|
||||||
|
f.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// dest path is same as buildkit for compat
|
||||||
|
dest := filepath.Join("/dev/pipes/", filepath.Base(f.Name()))
|
||||||
|
mount := Mount{Destination: dest, Type: define.TypeBind, Source: f.Name(), Options: append(define.BindOptions, "rprivate", "z", "Z")}
|
||||||
|
mountResult = append(mountResult, mount)
|
||||||
|
f.Close()
|
||||||
|
}
|
||||||
|
return mountResult, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Run executes a RUN instruction using the stage's current working container
|
// Run executes a RUN instruction using the stage's current working container
|
||||||
// as a root directory.
|
// as a root directory.
|
||||||
func (s *StageExecutor) Run(run imagebuilder.Run, config docker.Config) error {
|
func (s *StageExecutor) Run(run imagebuilder.Run, config docker.Config) error {
|
||||||
logrus.Debugf("RUN %#v, %#v", run, config)
|
logrus.Debugf("RUN %#v, %#v", run, config)
|
||||||
|
args := run.Args
|
||||||
|
heredocMounts := []Mount{}
|
||||||
|
if len(run.Files) > 0 {
|
||||||
|
if heredoc := buildkitparser.MustParseHeredoc(args[0]); heredoc != nil {
|
||||||
|
if strings.HasPrefix(run.Files[0].Data, "#!") || strings.HasPrefix(run.Files[0].Data, "\n#!") {
|
||||||
|
// This is a single heredoc with a shebang, so create a file
|
||||||
|
// and run it.
|
||||||
|
heredocMount, err := s.createNeededHeredocMountsForRun(run.Files)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
args = []string{heredocMount[0].Destination}
|
||||||
|
heredocMounts = append(heredocMounts, heredocMount...)
|
||||||
|
} else {
|
||||||
|
args = []string{run.Files[0].Data}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
full := args[0]
|
||||||
|
for _, file := range run.Files {
|
||||||
|
full += file.Data + "\n" + file.Name
|
||||||
|
}
|
||||||
|
args = []string{full}
|
||||||
|
}
|
||||||
|
}
|
||||||
stageMountPoints, err := s.runStageMountPoints(run.Mounts)
|
stageMountPoints, err := s.runStageMountPoints(run.Mounts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -658,7 +775,6 @@ func (s *StageExecutor) Run(run imagebuilder.Run, config docker.Config) error {
|
||||||
options.ConfigureNetwork = buildah.NetworkDisabled
|
options.ConfigureNetwork = buildah.NetworkDisabled
|
||||||
}
|
}
|
||||||
|
|
||||||
args := run.Args
|
|
||||||
if run.Shell {
|
if run.Shell {
|
||||||
if len(config.Shell) > 0 && s.builder.Format == define.Dockerv2ImageManifest {
|
if len(config.Shell) > 0 && s.builder.Format == define.Dockerv2ImageManifest {
|
||||||
args = append(config.Shell, args...)
|
args = append(config.Shell, args...)
|
||||||
|
@ -671,6 +787,9 @@ func (s *StageExecutor) Run(run imagebuilder.Run, config docker.Config) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
options.Mounts = append(options.Mounts, mounts...)
|
options.Mounts = append(options.Mounts, mounts...)
|
||||||
|
if len(heredocMounts) > 0 {
|
||||||
|
options.Mounts = append(options.Mounts, heredocMounts...)
|
||||||
|
}
|
||||||
err = s.builder.Run(args, options)
|
err = s.builder.Run(args, options)
|
||||||
if err2 := s.volumeCacheRestore(); err2 != nil {
|
if err2 := s.volumeCacheRestore(); err2 != nil {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
|
Binary file not shown.
|
@ -267,6 +267,41 @@ _EOF
|
||||||
run_buildah 1 run myctr ls -l subdir/
|
run_buildah 1 run myctr ls -l subdir/
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@test "bud build with heredoc content" {
|
||||||
|
run_buildah build -t heredoc $WITH_POLICY_JSON -f $BUDFILES/heredoc/Containerfile .
|
||||||
|
expect_output --substring "print first line from heredoc"
|
||||||
|
expect_output --substring "print second line from heredoc"
|
||||||
|
expect_output --substring "Heredoc writing first file"
|
||||||
|
expect_output --substring "some text of first file"
|
||||||
|
expect_output --substring "file2 from python"
|
||||||
|
expect_output --substring "(your index page goes here)"
|
||||||
|
expect_output --substring "(robots content)"
|
||||||
|
expect_output --substring "(humans content)"
|
||||||
|
expect_output --substring "this is the output of test6 part1"
|
||||||
|
expect_output --substring "this is the output of test6 part2"
|
||||||
|
expect_output --substring "this is the output of test7 part1"
|
||||||
|
expect_output --substring "this is the output of test7 part2"
|
||||||
|
expect_output --substring "this is the output of test7 part3"
|
||||||
|
expect_output --substring "this is the output of test8 part1"
|
||||||
|
expect_output --substring "this is the output of test8 part2"
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "bud build with heredoc content which is a bash file" {
|
||||||
|
skip_if_in_container
|
||||||
|
_prefetch busybox
|
||||||
|
run_buildah build -t heredoc $WITH_POLICY_JSON -f $BUDFILES/heredoc/Containerfile.bash_file .
|
||||||
|
expect_output --substring "this is the output of test9"
|
||||||
|
expect_output --substring "this is the output of test10"
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "bud build with heredoc verify mount leak" {
|
||||||
|
skip_if_in_container
|
||||||
|
_prefetch alpine
|
||||||
|
run_buildah 1 build -t heredoc $WITH_POLICY_JSON -f $BUDFILES/heredoc/Containerfile.verify_mount_leak .
|
||||||
|
expect_output --substring "this is the output of test"
|
||||||
|
expect_output --substring "ls: /dev/pipes: No such file or directory"
|
||||||
|
}
|
||||||
|
|
||||||
@test "bud with .containerignore" {
|
@test "bud with .containerignore" {
|
||||||
_prefetch alpine busybox
|
_prefetch alpine busybox
|
||||||
run_buildah 125 build -t testbud $WITH_POLICY_JSON -f $BUDFILES/containerignore/Dockerfile $BUDFILES/containerignore
|
run_buildah 125 build -t testbud $WITH_POLICY_JSON -f $BUDFILES/containerignore/Dockerfile $BUDFILES/containerignore
|
||||||
|
|
|
@ -0,0 +1,59 @@
|
||||||
|
FROM docker.io/library/python:latest
|
||||||
|
|
||||||
|
RUN <<EOF
|
||||||
|
echo "print first line from heredoc"
|
||||||
|
echo "print second line from heredoc"
|
||||||
|
EOF
|
||||||
|
|
||||||
|
RUN <<EOF
|
||||||
|
echo "Heredoc writing first file" >> /file1
|
||||||
|
echo "some text of first file" >> /file1
|
||||||
|
EOF
|
||||||
|
|
||||||
|
RUN cat file1
|
||||||
|
|
||||||
|
RUN python3 <<EOF
|
||||||
|
with open("/file2", "w") as f:
|
||||||
|
print("file2 from python", file=f)
|
||||||
|
EOF
|
||||||
|
|
||||||
|
RUN cat file2
|
||||||
|
|
||||||
|
ADD <<EOF /index.html
|
||||||
|
(your index page goes here)
|
||||||
|
EOF
|
||||||
|
|
||||||
|
RUN cat index.html
|
||||||
|
|
||||||
|
COPY <<robots.txt <<humans.txt /test/
|
||||||
|
(robots content)
|
||||||
|
robots.txt
|
||||||
|
(humans content)
|
||||||
|
humans.txt
|
||||||
|
|
||||||
|
RUN cat /proc/self/fd/5 /proc/self/fd/6 5<<FILE1 6<<FILE2 > test6.txt
|
||||||
|
this is the output of test6 part1
|
||||||
|
FILE1
|
||||||
|
this is the output of test6 part2
|
||||||
|
FILE2
|
||||||
|
|
||||||
|
RUN 5<<file cat /proc/self/fd/5 /proc/self/fd/6 6<<FILE | cat /dev/stdin /proc/self/fd/6 6<<File > test7.txt
|
||||||
|
this is the output of test7 part1
|
||||||
|
file
|
||||||
|
this is the output of test7 part2
|
||||||
|
FILE
|
||||||
|
this is the output of test7 part3
|
||||||
|
File
|
||||||
|
|
||||||
|
RUN <<FILE1 cat > test8.1 && <<FILE2 cat > test8.2
|
||||||
|
this is the output of test8 part1
|
||||||
|
FILE1
|
||||||
|
this is the output of test8 part2
|
||||||
|
FILE2
|
||||||
|
|
||||||
|
RUN cat /test/robots.txt
|
||||||
|
RUN cat /test/humans.txt
|
||||||
|
RUN cat test6.txt
|
||||||
|
RUN cat test7.txt
|
||||||
|
RUN cat test8.1
|
||||||
|
RUN cat test8.2
|
|
@ -0,0 +1,15 @@
|
||||||
|
FROM busybox
|
||||||
|
RUN <<EOF
|
||||||
|
#!/bin/sh
|
||||||
|
echo "
|
||||||
|
this is the output of test9" > test9.txt
|
||||||
|
EOF
|
||||||
|
|
||||||
|
RUN <<-EOF
|
||||||
|
#!/bin/sh
|
||||||
|
echo "
|
||||||
|
this is the output of test10" > test10.txt
|
||||||
|
EOF
|
||||||
|
|
||||||
|
RUN cat test9.txt
|
||||||
|
RUN cat test10.txt
|
|
@ -0,0 +1,17 @@
|
||||||
|
FROM alpine
|
||||||
|
|
||||||
|
RUN <<EOF
|
||||||
|
#!/bin/sh
|
||||||
|
echo "
|
||||||
|
this is the output of test" > test.txt
|
||||||
|
# Mount of this file must exists till this run step
|
||||||
|
# so this `ls` command should not fail
|
||||||
|
ls -a /dev/pipes/
|
||||||
|
EOF
|
||||||
|
|
||||||
|
RUN cat test.txt
|
||||||
|
|
||||||
|
# This ls command must fail, since mount is removed in this step
|
||||||
|
RUN ls -a /dev/pipes
|
||||||
|
|
||||||
|
|
|
@ -3016,6 +3016,21 @@ var internalTestCases = []testCase{
|
||||||
dockerUseBuildKit: true,
|
dockerUseBuildKit: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
name: "heredoc-copy",
|
||||||
|
dockerfile: "Dockerfile.heredoc_copy",
|
||||||
|
dockerUseBuildKit: true,
|
||||||
|
contextDir: "heredoc",
|
||||||
|
fsSkip: []string{"(dir):test:mtime",
|
||||||
|
"(dir):test2:mtime",
|
||||||
|
"(dir):test:(dir):humans.txt:mtime",
|
||||||
|
"(dir):test:(dir):robots.txt:mtime",
|
||||||
|
"(dir):test2:(dir):humans.txt:mtime",
|
||||||
|
"(dir):test2:(dir):robots.txt:mtime",
|
||||||
|
"(dir):test2:(dir):image_file:mtime",
|
||||||
|
"(dir):etc:(dir):hostname" /* buildkit does not contains /etc/hostname like buildah */},
|
||||||
|
},
|
||||||
|
|
||||||
{
|
{
|
||||||
name: "replace-symlink-with-directory",
|
name: "replace-symlink-with-directory",
|
||||||
contextDir: "replace/symlink-with-directory",
|
contextDir: "replace/symlink-with-directory",
|
||||||
|
|
|
@ -0,0 +1,23 @@
|
||||||
|
# syntax=docker/dockerfile:1.3-labs
|
||||||
|
FROM busybox as one
|
||||||
|
RUN echo helloworld > image_file
|
||||||
|
FROM busybox
|
||||||
|
RUN echo hello
|
||||||
|
# copy two heredoc and one from context
|
||||||
|
COPY <<robots.txt <<humans.txt file /test/
|
||||||
|
(robots content)
|
||||||
|
Long file with random text
|
||||||
|
Random line
|
||||||
|
HelloWorld
|
||||||
|
robots.txt
|
||||||
|
(humans content)
|
||||||
|
humans.txt
|
||||||
|
# copy two heredoc and one from another stage
|
||||||
|
COPY --from=one image_file <<robots.txt <<humans.txt /test2/
|
||||||
|
(robots content)
|
||||||
|
Long file with random text
|
||||||
|
Random line
|
||||||
|
HelloWorld
|
||||||
|
robots.txt
|
||||||
|
(humans content)
|
||||||
|
humans.txt
|
|
@ -0,0 +1 @@
|
||||||
|
somefile
|
|
@ -0,0 +1,2 @@
|
||||||
|
*.test
|
||||||
|
coverage.txt
|
|
@ -0,0 +1,191 @@
|
||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
https://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
Copyright The containerd Authors
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
https://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
|
@ -0,0 +1,20 @@
|
||||||
|
# typeurl
|
||||||
|
|
||||||
|
[](https://pkg.go.dev/github.com/containerd/typeurl)
|
||||||
|
[](https://github.com/containerd/typeurl/actions?query=workflow%3ACI)
|
||||||
|
[](https://codecov.io/gh/containerd/typeurl)
|
||||||
|
[](https://goreportcard.com/report/github.com/containerd/typeurl)
|
||||||
|
|
||||||
|
A Go package for managing the registration, marshaling, and unmarshaling of encoded types.
|
||||||
|
|
||||||
|
This package helps when types are sent over a GRPC API and marshaled as a [protobuf.Any](https://github.com/gogo/protobuf/blob/master/protobuf/google/protobuf/any.proto).
|
||||||
|
|
||||||
|
## Project details
|
||||||
|
|
||||||
|
**typeurl** is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE).
|
||||||
|
As a containerd sub-project, you will find the:
|
||||||
|
* [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md),
|
||||||
|
* [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS),
|
||||||
|
* and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md)
|
||||||
|
|
||||||
|
information in our [`containerd/project`](https://github.com/containerd/project) repository.
|
|
@ -0,0 +1,83 @@
|
||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package typeurl
|
||||||
|
|
||||||
|
// Package typeurl assists with managing the registration, marshaling, and
|
||||||
|
// unmarshaling of types encoded as protobuf.Any.
|
||||||
|
//
|
||||||
|
// A protobuf.Any is a proto message that can contain any arbitrary data. It
|
||||||
|
// consists of two components, a TypeUrl and a Value, and its proto definition
|
||||||
|
// looks like this:
|
||||||
|
//
|
||||||
|
// message Any {
|
||||||
|
// string type_url = 1;
|
||||||
|
// bytes value = 2;
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// The TypeUrl is used to distinguish the contents from other proto.Any
|
||||||
|
// messages. This typeurl library manages these URLs to enable automagic
|
||||||
|
// marshaling and unmarshaling of the contents.
|
||||||
|
//
|
||||||
|
// For example, consider this go struct:
|
||||||
|
//
|
||||||
|
// type Foo struct {
|
||||||
|
// Field1 string
|
||||||
|
// Field2 string
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// To use typeurl, types must first be registered. This is typically done in
|
||||||
|
// the init function
|
||||||
|
//
|
||||||
|
// func init() {
|
||||||
|
// typeurl.Register(&Foo{}, "Foo")
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// This will register the type Foo with the url path "Foo". The arguments to
|
||||||
|
// Register are variadic, and are used to construct a url path. Consider this
|
||||||
|
// example, from the github.com/containerd/containerd/client package:
|
||||||
|
//
|
||||||
|
// func init() {
|
||||||
|
// const prefix = "types.containerd.io"
|
||||||
|
// // register TypeUrls for commonly marshaled external types
|
||||||
|
// major := strconv.Itoa(specs.VersionMajor)
|
||||||
|
// typeurl.Register(&specs.Spec{}, prefix, "opencontainers/runtime-spec", major, "Spec")
|
||||||
|
// // this function has more Register calls, which are elided.
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// This registers several types under a more complex url, which ends up mapping
|
||||||
|
// to `types.containerd.io/opencontainers/runtime-spec/1/Spec` (or some other
|
||||||
|
// value for major).
|
||||||
|
//
|
||||||
|
// Once a type is registered, it can be marshaled to a proto.Any message simply
|
||||||
|
// by calling `MarshalAny`, like this:
|
||||||
|
//
|
||||||
|
// foo := &Foo{Field1: "value1", Field2: "value2"}
|
||||||
|
// anyFoo, err := typeurl.MarshalAny(foo)
|
||||||
|
//
|
||||||
|
// MarshalAny will resolve the correct URL for the type. If the type in
|
||||||
|
// question implements the proto.Message interface, then it will be marshaled
|
||||||
|
// as a proto message. Otherwise, it will be marshaled as json. This means that
|
||||||
|
// typeurl will work on any arbitrary data, whether or not it has a proto
|
||||||
|
// definition, as long as it can be serialized to json.
|
||||||
|
//
|
||||||
|
// To unmarshal, the process is simply inverse:
|
||||||
|
//
|
||||||
|
// iface, err := typeurl.UnmarshalAny(anyFoo)
|
||||||
|
// foo := iface.(*Foo)
|
||||||
|
//
|
||||||
|
// The correct type is automatically chosen from the type registry, and the
|
||||||
|
// returned interface can be cast straight to that type.
|
|
@ -0,0 +1,214 @@
|
||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package typeurl
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"path"
|
||||||
|
"reflect"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/gogo/protobuf/proto"
|
||||||
|
"github.com/gogo/protobuf/types"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
mu sync.RWMutex
|
||||||
|
registry = make(map[reflect.Type]string)
|
||||||
|
)
|
||||||
|
|
||||||
|
// Definitions of common error types used throughout typeurl.
|
||||||
|
//
|
||||||
|
// These error types are used with errors.Wrap and errors.Wrapf to add context
|
||||||
|
// to an error.
|
||||||
|
//
|
||||||
|
// To detect an error class, use errors.Is() functions to tell whether an
|
||||||
|
// error is of this type.
|
||||||
|
var (
|
||||||
|
ErrNotFound = errors.New("not found")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Register a type with a base URL for JSON marshaling. When the MarshalAny and
|
||||||
|
// UnmarshalAny functions are called they will treat the Any type value as JSON.
|
||||||
|
// To use protocol buffers for handling the Any value the proto.Register
|
||||||
|
// function should be used instead of this function.
|
||||||
|
func Register(v interface{}, args ...string) {
|
||||||
|
var (
|
||||||
|
t = tryDereference(v)
|
||||||
|
p = path.Join(args...)
|
||||||
|
)
|
||||||
|
mu.Lock()
|
||||||
|
defer mu.Unlock()
|
||||||
|
if et, ok := registry[t]; ok {
|
||||||
|
if et != p {
|
||||||
|
panic(errors.Errorf("type registered with alternate path %q != %q", et, p))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
registry[t] = p
|
||||||
|
}
|
||||||
|
|
||||||
|
// TypeURL returns the type url for a registered type.
|
||||||
|
func TypeURL(v interface{}) (string, error) {
|
||||||
|
mu.RLock()
|
||||||
|
u, ok := registry[tryDereference(v)]
|
||||||
|
mu.RUnlock()
|
||||||
|
if !ok {
|
||||||
|
// fallback to the proto registry if it is a proto message
|
||||||
|
pb, ok := v.(proto.Message)
|
||||||
|
if !ok {
|
||||||
|
return "", errors.Wrapf(ErrNotFound, "type %s", reflect.TypeOf(v))
|
||||||
|
}
|
||||||
|
return proto.MessageName(pb), nil
|
||||||
|
}
|
||||||
|
return u, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Is returns true if the type of the Any is the same as v.
|
||||||
|
func Is(any *types.Any, v interface{}) bool {
|
||||||
|
// call to check that v is a pointer
|
||||||
|
tryDereference(v)
|
||||||
|
url, err := TypeURL(v)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return any.TypeUrl == url
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalAny marshals the value v into an any with the correct TypeUrl.
|
||||||
|
// If the provided object is already a proto.Any message, then it will be
|
||||||
|
// returned verbatim. If it is of type proto.Message, it will be marshaled as a
|
||||||
|
// protocol buffer. Otherwise, the object will be marshaled to json.
|
||||||
|
func MarshalAny(v interface{}) (*types.Any, error) {
|
||||||
|
var marshal func(v interface{}) ([]byte, error)
|
||||||
|
switch t := v.(type) {
|
||||||
|
case *types.Any:
|
||||||
|
// avoid reserializing the type if we have an any.
|
||||||
|
return t, nil
|
||||||
|
case proto.Message:
|
||||||
|
marshal = func(v interface{}) ([]byte, error) {
|
||||||
|
return proto.Marshal(t)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
marshal = json.Marshal
|
||||||
|
}
|
||||||
|
|
||||||
|
url, err := TypeURL(v)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := marshal(v)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &types.Any{
|
||||||
|
TypeUrl: url,
|
||||||
|
Value: data,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalAny unmarshals the any type into a concrete type.
|
||||||
|
func UnmarshalAny(any *types.Any) (interface{}, error) {
|
||||||
|
return UnmarshalByTypeURL(any.TypeUrl, any.Value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalByTypeURL unmarshals the given type and value to into a concrete type.
|
||||||
|
func UnmarshalByTypeURL(typeURL string, value []byte) (interface{}, error) {
|
||||||
|
return unmarshal(typeURL, value, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalTo unmarshals the any type into a concrete type passed in the out
|
||||||
|
// argument. It is identical to UnmarshalAny, but lets clients provide a
|
||||||
|
// destination type through the out argument.
|
||||||
|
func UnmarshalTo(any *types.Any, out interface{}) error {
|
||||||
|
return UnmarshalToByTypeURL(any.TypeUrl, any.Value, out)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalTo unmarshals the given type and value into a concrete type passed
|
||||||
|
// in the out argument. It is identical to UnmarshalByTypeURL, but lets clients
|
||||||
|
// provide a destination type through the out argument.
|
||||||
|
func UnmarshalToByTypeURL(typeURL string, value []byte, out interface{}) error {
|
||||||
|
_, err := unmarshal(typeURL, value, out)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func unmarshal(typeURL string, value []byte, v interface{}) (interface{}, error) {
|
||||||
|
t, err := getTypeByUrl(typeURL)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if v == nil {
|
||||||
|
v = reflect.New(t.t).Interface()
|
||||||
|
} else {
|
||||||
|
// Validate interface type provided by client
|
||||||
|
vURL, err := TypeURL(v)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if typeURL != vURL {
|
||||||
|
return nil, errors.Errorf("can't unmarshal type %q to output %q", typeURL, vURL)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.isProto {
|
||||||
|
err = proto.Unmarshal(value, v.(proto.Message))
|
||||||
|
} else {
|
||||||
|
err = json.Unmarshal(value, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
return v, err
|
||||||
|
}
|
||||||
|
|
||||||
|
type urlType struct {
|
||||||
|
t reflect.Type
|
||||||
|
isProto bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func getTypeByUrl(url string) (urlType, error) {
|
||||||
|
mu.RLock()
|
||||||
|
for t, u := range registry {
|
||||||
|
if u == url {
|
||||||
|
mu.RUnlock()
|
||||||
|
return urlType{
|
||||||
|
t: t,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
mu.RUnlock()
|
||||||
|
// fallback to proto registry
|
||||||
|
t := proto.MessageType(url)
|
||||||
|
if t != nil {
|
||||||
|
return urlType{
|
||||||
|
// get the underlying Elem because proto returns a pointer to the type
|
||||||
|
t: t.Elem(),
|
||||||
|
isProto: true,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
return urlType{}, errors.Wrapf(ErrNotFound, "type with url %s", url)
|
||||||
|
}
|
||||||
|
|
||||||
|
func tryDereference(v interface{}) reflect.Type {
|
||||||
|
t := reflect.TypeOf(v)
|
||||||
|
if t.Kind() == reflect.Ptr {
|
||||||
|
// require check of pointer but dereference to register
|
||||||
|
return t.Elem()
|
||||||
|
}
|
||||||
|
panic("v is not a pointer to a type")
|
||||||
|
}
|
|
@ -0,0 +1,101 @@
|
||||||
|
// Protocol Buffers for Go with Gadgets
|
||||||
|
//
|
||||||
|
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
|
||||||
|
// http://github.com/gogo/protobuf
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
package sortkeys
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Strings(l []string) {
|
||||||
|
sort.Strings(l)
|
||||||
|
}
|
||||||
|
|
||||||
|
func Float64s(l []float64) {
|
||||||
|
sort.Float64s(l)
|
||||||
|
}
|
||||||
|
|
||||||
|
func Float32s(l []float32) {
|
||||||
|
sort.Sort(Float32Slice(l))
|
||||||
|
}
|
||||||
|
|
||||||
|
func Int64s(l []int64) {
|
||||||
|
sort.Sort(Int64Slice(l))
|
||||||
|
}
|
||||||
|
|
||||||
|
func Int32s(l []int32) {
|
||||||
|
sort.Sort(Int32Slice(l))
|
||||||
|
}
|
||||||
|
|
||||||
|
func Uint64s(l []uint64) {
|
||||||
|
sort.Sort(Uint64Slice(l))
|
||||||
|
}
|
||||||
|
|
||||||
|
func Uint32s(l []uint32) {
|
||||||
|
sort.Sort(Uint32Slice(l))
|
||||||
|
}
|
||||||
|
|
||||||
|
func Bools(l []bool) {
|
||||||
|
sort.Sort(BoolSlice(l))
|
||||||
|
}
|
||||||
|
|
||||||
|
type BoolSlice []bool
|
||||||
|
|
||||||
|
func (p BoolSlice) Len() int { return len(p) }
|
||||||
|
func (p BoolSlice) Less(i, j int) bool { return p[j] }
|
||||||
|
func (p BoolSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||||
|
|
||||||
|
type Int64Slice []int64
|
||||||
|
|
||||||
|
func (p Int64Slice) Len() int { return len(p) }
|
||||||
|
func (p Int64Slice) Less(i, j int) bool { return p[i] < p[j] }
|
||||||
|
func (p Int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||||
|
|
||||||
|
type Int32Slice []int32
|
||||||
|
|
||||||
|
func (p Int32Slice) Len() int { return len(p) }
|
||||||
|
func (p Int32Slice) Less(i, j int) bool { return p[i] < p[j] }
|
||||||
|
func (p Int32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||||
|
|
||||||
|
type Uint64Slice []uint64
|
||||||
|
|
||||||
|
func (p Uint64Slice) Len() int { return len(p) }
|
||||||
|
func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] }
|
||||||
|
func (p Uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||||
|
|
||||||
|
type Uint32Slice []uint32
|
||||||
|
|
||||||
|
func (p Uint32Slice) Len() int { return len(p) }
|
||||||
|
func (p Uint32Slice) Less(i, j int) bool { return p[i] < p[j] }
|
||||||
|
func (p Uint32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||||
|
|
||||||
|
type Float32Slice []float32
|
||||||
|
|
||||||
|
func (p Float32Slice) Len() int { return len(p) }
|
||||||
|
func (p Float32Slice) Less(i, j int) bool { return p[i] < p[j] }
|
||||||
|
func (p Float32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
|
@ -0,0 +1,140 @@
|
||||||
|
// Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
//
|
||||||
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
|
// https://github.com/golang/protobuf
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
package types
|
||||||
|
|
||||||
|
// This file implements functions to marshal proto.Message to/from
|
||||||
|
// google.protobuf.Any message.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/gogo/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
const googleApis = "type.googleapis.com/"
|
||||||
|
|
||||||
|
// AnyMessageName returns the name of the message contained in a google.protobuf.Any message.
|
||||||
|
//
|
||||||
|
// Note that regular type assertions should be done using the Is
|
||||||
|
// function. AnyMessageName is provided for less common use cases like filtering a
|
||||||
|
// sequence of Any messages based on a set of allowed message type names.
|
||||||
|
func AnyMessageName(any *Any) (string, error) {
|
||||||
|
if any == nil {
|
||||||
|
return "", fmt.Errorf("message is nil")
|
||||||
|
}
|
||||||
|
slash := strings.LastIndex(any.TypeUrl, "/")
|
||||||
|
if slash < 0 {
|
||||||
|
return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
|
||||||
|
}
|
||||||
|
return any.TypeUrl[slash+1:], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any.
|
||||||
|
func MarshalAny(pb proto.Message) (*Any, error) {
|
||||||
|
value, err := proto.Marshal(pb)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DynamicAny is a value that can be passed to UnmarshalAny to automatically
|
||||||
|
// allocate a proto.Message for the type specified in a google.protobuf.Any
|
||||||
|
// message. The allocated message is stored in the embedded proto.Message.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
//
|
||||||
|
// var x ptypes.DynamicAny
|
||||||
|
// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
|
||||||
|
// fmt.Printf("unmarshaled message: %v", x.Message)
|
||||||
|
type DynamicAny struct {
|
||||||
|
proto.Message
|
||||||
|
}
|
||||||
|
|
||||||
|
// Empty returns a new proto.Message of the type specified in a
|
||||||
|
// google.protobuf.Any message. It returns an error if corresponding message
|
||||||
|
// type isn't linked in.
|
||||||
|
func EmptyAny(any *Any) (proto.Message, error) {
|
||||||
|
aname, err := AnyMessageName(any)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
t := proto.MessageType(aname)
|
||||||
|
if t == nil {
|
||||||
|
return nil, fmt.Errorf("any: message type %q isn't linked in", aname)
|
||||||
|
}
|
||||||
|
return reflect.New(t.Elem()).Interface().(proto.Message), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any
|
||||||
|
// message and places the decoded result in pb. It returns an error if type of
|
||||||
|
// contents of Any message does not match type of pb message.
|
||||||
|
//
|
||||||
|
// pb can be a proto.Message, or a *DynamicAny.
|
||||||
|
func UnmarshalAny(any *Any, pb proto.Message) error {
|
||||||
|
if d, ok := pb.(*DynamicAny); ok {
|
||||||
|
if d.Message == nil {
|
||||||
|
var err error
|
||||||
|
d.Message, err = EmptyAny(any)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return UnmarshalAny(any, d.Message)
|
||||||
|
}
|
||||||
|
|
||||||
|
aname, err := AnyMessageName(any)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
mname := proto.MessageName(pb)
|
||||||
|
if aname != mname {
|
||||||
|
return fmt.Errorf("mismatched message type: got %q want %q", aname, mname)
|
||||||
|
}
|
||||||
|
return proto.Unmarshal(any.Value, pb)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Is returns true if any value contains a given message type.
|
||||||
|
func Is(any *Any, pb proto.Message) bool {
|
||||||
|
// The following is equivalent to AnyMessageName(any) == proto.MessageName(pb),
|
||||||
|
// but it avoids scanning TypeUrl for the slash.
|
||||||
|
if any == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
name := proto.MessageName(pb)
|
||||||
|
prefix := len(any.TypeUrl) - len(name)
|
||||||
|
return prefix >= 1 && any.TypeUrl[prefix-1] == '/' && any.TypeUrl[prefix:] == name
|
||||||
|
}
|
|
@ -0,0 +1,694 @@
|
||||||
|
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||||
|
// source: google/protobuf/any.proto
|
||||||
|
|
||||||
|
package types
|
||||||
|
|
||||||
|
import (
|
||||||
|
bytes "bytes"
|
||||||
|
fmt "fmt"
|
||||||
|
proto "github.com/gogo/protobuf/proto"
|
||||||
|
io "io"
|
||||||
|
math "math"
|
||||||
|
math_bits "math/bits"
|
||||||
|
reflect "reflect"
|
||||||
|
strings "strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
var _ = proto.Marshal
|
||||||
|
var _ = fmt.Errorf
|
||||||
|
var _ = math.Inf
|
||||||
|
|
||||||
|
// This is a compile-time assertion to ensure that this generated file
|
||||||
|
// is compatible with the proto package it is being compiled against.
|
||||||
|
// A compilation error at this line likely means your copy of the
|
||||||
|
// proto package needs to be updated.
|
||||||
|
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
|
||||||
|
|
||||||
|
// `Any` contains an arbitrary serialized protocol buffer message along with a
|
||||||
|
// URL that describes the type of the serialized message.
|
||||||
|
//
|
||||||
|
// Protobuf library provides support to pack/unpack Any values in the form
|
||||||
|
// of utility functions or additional generated methods of the Any type.
|
||||||
|
//
|
||||||
|
// Example 1: Pack and unpack a message in C++.
|
||||||
|
//
|
||||||
|
// Foo foo = ...;
|
||||||
|
// Any any;
|
||||||
|
// any.PackFrom(foo);
|
||||||
|
// ...
|
||||||
|
// if (any.UnpackTo(&foo)) {
|
||||||
|
// ...
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Example 2: Pack and unpack a message in Java.
|
||||||
|
//
|
||||||
|
// Foo foo = ...;
|
||||||
|
// Any any = Any.pack(foo);
|
||||||
|
// ...
|
||||||
|
// if (any.is(Foo.class)) {
|
||||||
|
// foo = any.unpack(Foo.class);
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Example 3: Pack and unpack a message in Python.
|
||||||
|
//
|
||||||
|
// foo = Foo(...)
|
||||||
|
// any = Any()
|
||||||
|
// any.Pack(foo)
|
||||||
|
// ...
|
||||||
|
// if any.Is(Foo.DESCRIPTOR):
|
||||||
|
// any.Unpack(foo)
|
||||||
|
// ...
|
||||||
|
//
|
||||||
|
// Example 4: Pack and unpack a message in Go
|
||||||
|
//
|
||||||
|
// foo := &pb.Foo{...}
|
||||||
|
// any, err := ptypes.MarshalAny(foo)
|
||||||
|
// ...
|
||||||
|
// foo := &pb.Foo{}
|
||||||
|
// if err := ptypes.UnmarshalAny(any, foo); err != nil {
|
||||||
|
// ...
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// The pack methods provided by protobuf library will by default use
|
||||||
|
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
|
||||||
|
// methods only use the fully qualified type name after the last '/'
|
||||||
|
// in the type URL, for example "foo.bar.com/x/y.z" will yield type
|
||||||
|
// name "y.z".
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// JSON
|
||||||
|
// ====
|
||||||
|
// The JSON representation of an `Any` value uses the regular
|
||||||
|
// representation of the deserialized, embedded message, with an
|
||||||
|
// additional field `@type` which contains the type URL. Example:
|
||||||
|
//
|
||||||
|
// package google.profile;
|
||||||
|
// message Person {
|
||||||
|
// string first_name = 1;
|
||||||
|
// string last_name = 2;
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// {
|
||||||
|
// "@type": "type.googleapis.com/google.profile.Person",
|
||||||
|
// "firstName": <string>,
|
||||||
|
// "lastName": <string>
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// If the embedded message type is well-known and has a custom JSON
|
||||||
|
// representation, that representation will be embedded adding a field
|
||||||
|
// `value` which holds the custom JSON in addition to the `@type`
|
||||||
|
// field. Example (for message [google.protobuf.Duration][]):
|
||||||
|
//
|
||||||
|
// {
|
||||||
|
// "@type": "type.googleapis.com/google.protobuf.Duration",
|
||||||
|
// "value": "1.212s"
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
type Any struct {
|
||||||
|
// A URL/resource name that uniquely identifies the type of the serialized
|
||||||
|
// protocol buffer message. This string must contain at least
|
||||||
|
// one "/" character. The last segment of the URL's path must represent
|
||||||
|
// the fully qualified name of the type (as in
|
||||||
|
// `path/google.protobuf.Duration`). The name should be in a canonical form
|
||||||
|
// (e.g., leading "." is not accepted).
|
||||||
|
//
|
||||||
|
// In practice, teams usually precompile into the binary all types that they
|
||||||
|
// expect it to use in the context of Any. However, for URLs which use the
|
||||||
|
// scheme `http`, `https`, or no scheme, one can optionally set up a type
|
||||||
|
// server that maps type URLs to message definitions as follows:
|
||||||
|
//
|
||||||
|
// * If no scheme is provided, `https` is assumed.
|
||||||
|
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
|
||||||
|
// value in binary format, or produce an error.
|
||||||
|
// * Applications are allowed to cache lookup results based on the
|
||||||
|
// URL, or have them precompiled into a binary to avoid any
|
||||||
|
// lookup. Therefore, binary compatibility needs to be preserved
|
||||||
|
// on changes to types. (Use versioned type names to manage
|
||||||
|
// breaking changes.)
|
||||||
|
//
|
||||||
|
// Note: this functionality is not currently available in the official
|
||||||
|
// protobuf release, and it is not used for type URLs beginning with
|
||||||
|
// type.googleapis.com.
|
||||||
|
//
|
||||||
|
// Schemes other than `http`, `https` (or the empty scheme) might be
|
||||||
|
// used with implementation specific semantics.
|
||||||
|
//
|
||||||
|
TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"`
|
||||||
|
// Must be a valid serialized protocol buffer of the above specified type.
|
||||||
|
Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Any) Reset() { *m = Any{} }
|
||||||
|
func (*Any) ProtoMessage() {}
|
||||||
|
func (*Any) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_b53526c13ae22eb4, []int{0}
|
||||||
|
}
|
||||||
|
func (*Any) XXX_WellKnownType() string { return "Any" }
|
||||||
|
func (m *Any) XXX_Unmarshal(b []byte) error {
|
||||||
|
return m.Unmarshal(b)
|
||||||
|
}
|
||||||
|
func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
if deterministic {
|
||||||
|
return xxx_messageInfo_Any.Marshal(b, m, deterministic)
|
||||||
|
} else {
|
||||||
|
b = b[:cap(b)]
|
||||||
|
n, err := m.MarshalToSizedBuffer(b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return b[:n], nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func (m *Any) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_Any.Merge(m, src)
|
||||||
|
}
|
||||||
|
func (m *Any) XXX_Size() int {
|
||||||
|
return m.Size()
|
||||||
|
}
|
||||||
|
func (m *Any) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_Any.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_Any proto.InternalMessageInfo
|
||||||
|
|
||||||
|
func (m *Any) GetTypeUrl() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.TypeUrl
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Any) GetValue() []byte {
|
||||||
|
if m != nil {
|
||||||
|
return m.Value
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*Any) XXX_MessageName() string {
|
||||||
|
return "google.protobuf.Any"
|
||||||
|
}
|
||||||
|
func init() {
|
||||||
|
proto.RegisterType((*Any)(nil), "google.protobuf.Any")
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4) }
|
||||||
|
|
||||||
|
var fileDescriptor_b53526c13ae22eb4 = []byte{
|
||||||
|
// 211 bytes of a gzipped FileDescriptorProto
|
||||||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f,
|
||||||
|
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4,
|
||||||
|
0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a,
|
||||||
|
0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46,
|
||||||
|
0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7,
|
||||||
|
0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xaa, 0xbf, 0xf1, 0x50, 0x8e,
|
||||||
|
0xe1, 0xc3, 0x43, 0x39, 0xc6, 0x1f, 0x0f, 0xe5, 0x18, 0x1b, 0x1e, 0xc9, 0x31, 0xae, 0x78, 0x24,
|
||||||
|
0xc7, 0x78, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0xbe, 0x78,
|
||||||
|
0x24, 0xc7, 0xf0, 0x01, 0x24, 0xfe, 0x58, 0x8e, 0xf1, 0xc4, 0x63, 0x39, 0x46, 0x2e, 0xe1, 0xe4,
|
||||||
|
0xfc, 0x5c, 0x3d, 0x34, 0xeb, 0x9d, 0x38, 0x1c, 0xf3, 0x2a, 0x03, 0x40, 0x9c, 0x00, 0xc6, 0x28,
|
||||||
|
0x56, 0x90, 0x8d, 0xc5, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x94,
|
||||||
|
0x06, 0x40, 0x95, 0xea, 0x85, 0xa7, 0xe6, 0xe4, 0x78, 0xe7, 0xe5, 0x97, 0xe7, 0x85, 0x80, 0x94,
|
||||||
|
0x25, 0xb1, 0x81, 0xcd, 0x30, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb7, 0x81, 0x82, 0xd3, 0xed,
|
||||||
|
0x00, 0x00, 0x00,
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *Any) Compare(that interface{}) int {
|
||||||
|
if that == nil {
|
||||||
|
if this == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
that1, ok := that.(*Any)
|
||||||
|
if !ok {
|
||||||
|
that2, ok := that.(Any)
|
||||||
|
if ok {
|
||||||
|
that1 = &that2
|
||||||
|
} else {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if that1 == nil {
|
||||||
|
if this == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
} else if this == nil {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
if this.TypeUrl != that1.TypeUrl {
|
||||||
|
if this.TypeUrl < that1.TypeUrl {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
if c := bytes.Compare(this.Value, that1.Value); c != 0 {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
func (this *Any) Equal(that interface{}) bool {
|
||||||
|
if that == nil {
|
||||||
|
return this == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
that1, ok := that.(*Any)
|
||||||
|
if !ok {
|
||||||
|
that2, ok := that.(Any)
|
||||||
|
if ok {
|
||||||
|
that1 = &that2
|
||||||
|
} else {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if that1 == nil {
|
||||||
|
return this == nil
|
||||||
|
} else if this == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if this.TypeUrl != that1.TypeUrl {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if !bytes.Equal(this.Value, that1.Value) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
func (this *Any) GoString() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
s := make([]string, 0, 6)
|
||||||
|
s = append(s, "&types.Any{")
|
||||||
|
s = append(s, "TypeUrl: "+fmt.Sprintf("%#v", this.TypeUrl)+",\n")
|
||||||
|
s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n")
|
||||||
|
if this.XXX_unrecognized != nil {
|
||||||
|
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||||
|
}
|
||||||
|
s = append(s, "}")
|
||||||
|
return strings.Join(s, "")
|
||||||
|
}
|
||||||
|
func valueToGoStringAny(v interface{}, typ string) string {
|
||||||
|
rv := reflect.ValueOf(v)
|
||||||
|
if rv.IsNil() {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
pv := reflect.Indirect(rv).Interface()
|
||||||
|
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
|
||||||
|
}
|
||||||
|
func (m *Any) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Any) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
size := m.Size()
|
||||||
|
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Any) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||||
|
i := len(dAtA)
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if m.XXX_unrecognized != nil {
|
||||||
|
i -= len(m.XXX_unrecognized)
|
||||||
|
copy(dAtA[i:], m.XXX_unrecognized)
|
||||||
|
}
|
||||||
|
if len(m.Value) > 0 {
|
||||||
|
i -= len(m.Value)
|
||||||
|
copy(dAtA[i:], m.Value)
|
||||||
|
i = encodeVarintAny(dAtA, i, uint64(len(m.Value)))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x12
|
||||||
|
}
|
||||||
|
if len(m.TypeUrl) > 0 {
|
||||||
|
i -= len(m.TypeUrl)
|
||||||
|
copy(dAtA[i:], m.TypeUrl)
|
||||||
|
i = encodeVarintAny(dAtA, i, uint64(len(m.TypeUrl)))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
}
|
||||||
|
return len(dAtA) - i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeVarintAny(dAtA []byte, offset int, v uint64) int {
|
||||||
|
offset -= sovAny(v)
|
||||||
|
base := offset
|
||||||
|
for v >= 1<<7 {
|
||||||
|
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||||
|
v >>= 7
|
||||||
|
offset++
|
||||||
|
}
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
return base
|
||||||
|
}
|
||||||
|
func NewPopulatedAny(r randyAny, easy bool) *Any {
|
||||||
|
this := &Any{}
|
||||||
|
this.TypeUrl = string(randStringAny(r))
|
||||||
|
v1 := r.Intn(100)
|
||||||
|
this.Value = make([]byte, v1)
|
||||||
|
for i := 0; i < v1; i++ {
|
||||||
|
this.Value[i] = byte(r.Intn(256))
|
||||||
|
}
|
||||||
|
if !easy && r.Intn(10) != 0 {
|
||||||
|
this.XXX_unrecognized = randUnrecognizedAny(r, 3)
|
||||||
|
}
|
||||||
|
return this
|
||||||
|
}
|
||||||
|
|
||||||
|
type randyAny interface {
|
||||||
|
Float32() float32
|
||||||
|
Float64() float64
|
||||||
|
Int63() int64
|
||||||
|
Int31() int32
|
||||||
|
Uint32() uint32
|
||||||
|
Intn(n int) int
|
||||||
|
}
|
||||||
|
|
||||||
|
func randUTF8RuneAny(r randyAny) rune {
|
||||||
|
ru := r.Intn(62)
|
||||||
|
if ru < 10 {
|
||||||
|
return rune(ru + 48)
|
||||||
|
} else if ru < 36 {
|
||||||
|
return rune(ru + 55)
|
||||||
|
}
|
||||||
|
return rune(ru + 61)
|
||||||
|
}
|
||||||
|
func randStringAny(r randyAny) string {
|
||||||
|
v2 := r.Intn(100)
|
||||||
|
tmps := make([]rune, v2)
|
||||||
|
for i := 0; i < v2; i++ {
|
||||||
|
tmps[i] = randUTF8RuneAny(r)
|
||||||
|
}
|
||||||
|
return string(tmps)
|
||||||
|
}
|
||||||
|
func randUnrecognizedAny(r randyAny, maxFieldNumber int) (dAtA []byte) {
|
||||||
|
l := r.Intn(5)
|
||||||
|
for i := 0; i < l; i++ {
|
||||||
|
wire := r.Intn(4)
|
||||||
|
if wire == 3 {
|
||||||
|
wire = 5
|
||||||
|
}
|
||||||
|
fieldNumber := maxFieldNumber + r.Intn(100)
|
||||||
|
dAtA = randFieldAny(dAtA, r, fieldNumber, wire)
|
||||||
|
}
|
||||||
|
return dAtA
|
||||||
|
}
|
||||||
|
func randFieldAny(dAtA []byte, r randyAny, fieldNumber int, wire int) []byte {
|
||||||
|
key := uint32(fieldNumber)<<3 | uint32(wire)
|
||||||
|
switch wire {
|
||||||
|
case 0:
|
||||||
|
dAtA = encodeVarintPopulateAny(dAtA, uint64(key))
|
||||||
|
v3 := r.Int63()
|
||||||
|
if r.Intn(2) == 0 {
|
||||||
|
v3 *= -1
|
||||||
|
}
|
||||||
|
dAtA = encodeVarintPopulateAny(dAtA, uint64(v3))
|
||||||
|
case 1:
|
||||||
|
dAtA = encodeVarintPopulateAny(dAtA, uint64(key))
|
||||||
|
dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)))
|
||||||
|
case 2:
|
||||||
|
dAtA = encodeVarintPopulateAny(dAtA, uint64(key))
|
||||||
|
ll := r.Intn(100)
|
||||||
|
dAtA = encodeVarintPopulateAny(dAtA, uint64(ll))
|
||||||
|
for j := 0; j < ll; j++ {
|
||||||
|
dAtA = append(dAtA, byte(r.Intn(256)))
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
dAtA = encodeVarintPopulateAny(dAtA, uint64(key))
|
||||||
|
dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)))
|
||||||
|
}
|
||||||
|
return dAtA
|
||||||
|
}
|
||||||
|
func encodeVarintPopulateAny(dAtA []byte, v uint64) []byte {
|
||||||
|
for v >= 1<<7 {
|
||||||
|
dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80))
|
||||||
|
v >>= 7
|
||||||
|
}
|
||||||
|
dAtA = append(dAtA, uint8(v))
|
||||||
|
return dAtA
|
||||||
|
}
|
||||||
|
func (m *Any) Size() (n int) {
|
||||||
|
if m == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
l = len(m.TypeUrl)
|
||||||
|
if l > 0 {
|
||||||
|
n += 1 + l + sovAny(uint64(l))
|
||||||
|
}
|
||||||
|
l = len(m.Value)
|
||||||
|
if l > 0 {
|
||||||
|
n += 1 + l + sovAny(uint64(l))
|
||||||
|
}
|
||||||
|
if m.XXX_unrecognized != nil {
|
||||||
|
n += len(m.XXX_unrecognized)
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func sovAny(x uint64) (n int) {
|
||||||
|
return (math_bits.Len64(x|1) + 6) / 7
|
||||||
|
}
|
||||||
|
func sozAny(x uint64) (n int) {
|
||||||
|
return sovAny(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||||
|
}
|
||||||
|
func (this *Any) String() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
s := strings.Join([]string{`&Any{`,
|
||||||
|
`TypeUrl:` + fmt.Sprintf("%v", this.TypeUrl) + `,`,
|
||||||
|
`Value:` + fmt.Sprintf("%v", this.Value) + `,`,
|
||||||
|
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||||
|
`}`,
|
||||||
|
}, "")
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
func valueToStringAny(v interface{}) string {
|
||||||
|
rv := reflect.ValueOf(v)
|
||||||
|
if rv.IsNil() {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
pv := reflect.Indirect(rv).Interface()
|
||||||
|
return fmt.Sprintf("*%v", pv)
|
||||||
|
}
|
||||||
|
func (m *Any) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowAny
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= uint64(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: Any: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: Any: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field TypeUrl", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowAny
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= uint64(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthAny
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex < 0 {
|
||||||
|
return ErrInvalidLengthAny
|
||||||
|
}
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.TypeUrl = string(dAtA[iNdEx:postIndex])
|
||||||
|
iNdEx = postIndex
|
||||||
|
case 2:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
|
||||||
|
}
|
||||||
|
var byteLen int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowAny
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
byteLen |= int(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if byteLen < 0 {
|
||||||
|
return ErrInvalidLengthAny
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + byteLen
|
||||||
|
if postIndex < 0 {
|
||||||
|
return ErrInvalidLengthAny
|
||||||
|
}
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
|
||||||
|
if m.Value == nil {
|
||||||
|
m.Value = []byte{}
|
||||||
|
}
|
||||||
|
iNdEx = postIndex
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipAny(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||||
|
return ErrInvalidLengthAny
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func skipAny(dAtA []byte) (n int, err error) {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
depth := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowAny
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
switch wireType {
|
||||||
|
case 0:
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowAny
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx++
|
||||||
|
if dAtA[iNdEx-1] < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case 1:
|
||||||
|
iNdEx += 8
|
||||||
|
case 2:
|
||||||
|
var length int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowAny
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
length |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if length < 0 {
|
||||||
|
return 0, ErrInvalidLengthAny
|
||||||
|
}
|
||||||
|
iNdEx += length
|
||||||
|
case 3:
|
||||||
|
depth++
|
||||||
|
case 4:
|
||||||
|
if depth == 0 {
|
||||||
|
return 0, ErrUnexpectedEndOfGroupAny
|
||||||
|
}
|
||||||
|
depth--
|
||||||
|
case 5:
|
||||||
|
iNdEx += 4
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||||
|
}
|
||||||
|
if iNdEx < 0 {
|
||||||
|
return 0, ErrInvalidLengthAny
|
||||||
|
}
|
||||||
|
if depth == 0 {
|
||||||
|
return iNdEx, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrInvalidLengthAny = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||||
|
ErrIntOverflowAny = fmt.Errorf("proto: integer overflow")
|
||||||
|
ErrUnexpectedEndOfGroupAny = fmt.Errorf("proto: unexpected end of group")
|
||||||
|
)
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,35 @@
|
||||||
|
// Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
//
|
||||||
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
|
// https://github.com/golang/protobuf
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package types contains code for interacting with well-known types.
|
||||||
|
*/
|
||||||
|
package types
|
|
@ -0,0 +1,100 @@
|
||||||
|
// Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
//
|
||||||
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
|
// https://github.com/golang/protobuf
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
package types
|
||||||
|
|
||||||
|
// This file implements conversions between google.protobuf.Duration
|
||||||
|
// and time.Duration.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Range of a Duration in seconds, as specified in
|
||||||
|
// google/protobuf/duration.proto. This is about 10,000 years in seconds.
|
||||||
|
maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
|
||||||
|
minSeconds = -maxSeconds
|
||||||
|
)
|
||||||
|
|
||||||
|
// validateDuration determines whether the Duration is valid according to the
|
||||||
|
// definition in google/protobuf/duration.proto. A valid Duration
|
||||||
|
// may still be too large to fit into a time.Duration (the range of Duration
|
||||||
|
// is about 10,000 years, and the range of time.Duration is about 290).
|
||||||
|
func validateDuration(d *Duration) error {
|
||||||
|
if d == nil {
|
||||||
|
return errors.New("duration: nil Duration")
|
||||||
|
}
|
||||||
|
if d.Seconds < minSeconds || d.Seconds > maxSeconds {
|
||||||
|
return fmt.Errorf("duration: %#v: seconds out of range", d)
|
||||||
|
}
|
||||||
|
if d.Nanos <= -1e9 || d.Nanos >= 1e9 {
|
||||||
|
return fmt.Errorf("duration: %#v: nanos out of range", d)
|
||||||
|
}
|
||||||
|
// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
|
||||||
|
if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) {
|
||||||
|
return fmt.Errorf("duration: %#v: seconds and nanos have different signs", d)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DurationFromProto converts a Duration to a time.Duration. DurationFromProto
|
||||||
|
// returns an error if the Duration is invalid or is too large to be
|
||||||
|
// represented in a time.Duration.
|
||||||
|
func DurationFromProto(p *Duration) (time.Duration, error) {
|
||||||
|
if err := validateDuration(p); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
d := time.Duration(p.Seconds) * time.Second
|
||||||
|
if int64(d/time.Second) != p.Seconds {
|
||||||
|
return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p)
|
||||||
|
}
|
||||||
|
if p.Nanos != 0 {
|
||||||
|
d += time.Duration(p.Nanos) * time.Nanosecond
|
||||||
|
if (d < 0) != (p.Nanos < 0) {
|
||||||
|
return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return d, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DurationProto converts a time.Duration to a Duration.
|
||||||
|
func DurationProto(d time.Duration) *Duration {
|
||||||
|
nanos := d.Nanoseconds()
|
||||||
|
secs := nanos / 1e9
|
||||||
|
nanos -= secs * 1e9
|
||||||
|
return &Duration{
|
||||||
|
Seconds: secs,
|
||||||
|
Nanos: int32(nanos),
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,517 @@
|
||||||
|
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||||
|
// source: google/protobuf/duration.proto
|
||||||
|
|
||||||
|
package types
|
||||||
|
|
||||||
|
import (
|
||||||
|
bytes "bytes"
|
||||||
|
fmt "fmt"
|
||||||
|
proto "github.com/gogo/protobuf/proto"
|
||||||
|
io "io"
|
||||||
|
math "math"
|
||||||
|
math_bits "math/bits"
|
||||||
|
reflect "reflect"
|
||||||
|
strings "strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
var _ = proto.Marshal
|
||||||
|
var _ = fmt.Errorf
|
||||||
|
var _ = math.Inf
|
||||||
|
|
||||||
|
// This is a compile-time assertion to ensure that this generated file
|
||||||
|
// is compatible with the proto package it is being compiled against.
|
||||||
|
// A compilation error at this line likely means your copy of the
|
||||||
|
// proto package needs to be updated.
|
||||||
|
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
|
||||||
|
|
||||||
|
// A Duration represents a signed, fixed-length span of time represented
|
||||||
|
// as a count of seconds and fractions of seconds at nanosecond
|
||||||
|
// resolution. It is independent of any calendar and concepts like "day"
|
||||||
|
// or "month". It is related to Timestamp in that the difference between
|
||||||
|
// two Timestamp values is a Duration and it can be added or subtracted
|
||||||
|
// from a Timestamp. Range is approximately +-10,000 years.
|
||||||
|
//
|
||||||
|
// # Examples
|
||||||
|
//
|
||||||
|
// Example 1: Compute Duration from two Timestamps in pseudo code.
|
||||||
|
//
|
||||||
|
// Timestamp start = ...;
|
||||||
|
// Timestamp end = ...;
|
||||||
|
// Duration duration = ...;
|
||||||
|
//
|
||||||
|
// duration.seconds = end.seconds - start.seconds;
|
||||||
|
// duration.nanos = end.nanos - start.nanos;
|
||||||
|
//
|
||||||
|
// if (duration.seconds < 0 && duration.nanos > 0) {
|
||||||
|
// duration.seconds += 1;
|
||||||
|
// duration.nanos -= 1000000000;
|
||||||
|
// } else if (durations.seconds > 0 && duration.nanos < 0) {
|
||||||
|
// duration.seconds -= 1;
|
||||||
|
// duration.nanos += 1000000000;
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
|
||||||
|
//
|
||||||
|
// Timestamp start = ...;
|
||||||
|
// Duration duration = ...;
|
||||||
|
// Timestamp end = ...;
|
||||||
|
//
|
||||||
|
// end.seconds = start.seconds + duration.seconds;
|
||||||
|
// end.nanos = start.nanos + duration.nanos;
|
||||||
|
//
|
||||||
|
// if (end.nanos < 0) {
|
||||||
|
// end.seconds -= 1;
|
||||||
|
// end.nanos += 1000000000;
|
||||||
|
// } else if (end.nanos >= 1000000000) {
|
||||||
|
// end.seconds += 1;
|
||||||
|
// end.nanos -= 1000000000;
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Example 3: Compute Duration from datetime.timedelta in Python.
|
||||||
|
//
|
||||||
|
// td = datetime.timedelta(days=3, minutes=10)
|
||||||
|
// duration = Duration()
|
||||||
|
// duration.FromTimedelta(td)
|
||||||
|
//
|
||||||
|
// # JSON Mapping
|
||||||
|
//
|
||||||
|
// In JSON format, the Duration type is encoded as a string rather than an
|
||||||
|
// object, where the string ends in the suffix "s" (indicating seconds) and
|
||||||
|
// is preceded by the number of seconds, with nanoseconds expressed as
|
||||||
|
// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
|
||||||
|
// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
|
||||||
|
// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
|
||||||
|
// microsecond should be expressed in JSON format as "3.000001s".
|
||||||
|
//
|
||||||
|
//
|
||||||
|
type Duration struct {
|
||||||
|
// Signed seconds of the span of time. Must be from -315,576,000,000
|
||||||
|
// to +315,576,000,000 inclusive. Note: these bounds are computed from:
|
||||||
|
// 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
|
||||||
|
Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
|
||||||
|
// Signed fractions of a second at nanosecond resolution of the span
|
||||||
|
// of time. Durations less than one second are represented with a 0
|
||||||
|
// `seconds` field and a positive or negative `nanos` field. For durations
|
||||||
|
// of one second or more, a non-zero value for the `nanos` field must be
|
||||||
|
// of the same sign as the `seconds` field. Must be from -999,999,999
|
||||||
|
// to +999,999,999 inclusive.
|
||||||
|
Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Duration) Reset() { *m = Duration{} }
|
||||||
|
func (*Duration) ProtoMessage() {}
|
||||||
|
func (*Duration) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_23597b2ebd7ac6c5, []int{0}
|
||||||
|
}
|
||||||
|
func (*Duration) XXX_WellKnownType() string { return "Duration" }
|
||||||
|
func (m *Duration) XXX_Unmarshal(b []byte) error {
|
||||||
|
return m.Unmarshal(b)
|
||||||
|
}
|
||||||
|
func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
if deterministic {
|
||||||
|
return xxx_messageInfo_Duration.Marshal(b, m, deterministic)
|
||||||
|
} else {
|
||||||
|
b = b[:cap(b)]
|
||||||
|
n, err := m.MarshalToSizedBuffer(b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return b[:n], nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func (m *Duration) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_Duration.Merge(m, src)
|
||||||
|
}
|
||||||
|
func (m *Duration) XXX_Size() int {
|
||||||
|
return m.Size()
|
||||||
|
}
|
||||||
|
func (m *Duration) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_Duration.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_Duration proto.InternalMessageInfo
|
||||||
|
|
||||||
|
func (m *Duration) GetSeconds() int64 {
|
||||||
|
if m != nil {
|
||||||
|
return m.Seconds
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Duration) GetNanos() int32 {
|
||||||
|
if m != nil {
|
||||||
|
return m.Nanos
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*Duration) XXX_MessageName() string {
|
||||||
|
return "google.protobuf.Duration"
|
||||||
|
}
|
||||||
|
func init() {
|
||||||
|
proto.RegisterType((*Duration)(nil), "google.protobuf.Duration")
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5) }
|
||||||
|
|
||||||
|
var fileDescriptor_23597b2ebd7ac6c5 = []byte{
|
||||||
|
// 209 bytes of a gzipped FileDescriptorProto
|
||||||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f,
|
||||||
|
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a,
|
||||||
|
0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56,
|
||||||
|
0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5,
|
||||||
|
0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e,
|
||||||
|
0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0x7f, 0xe3, 0xa1, 0x1c,
|
||||||
|
0xc3, 0x87, 0x87, 0x72, 0x8c, 0x2b, 0x1e, 0xc9, 0x31, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91,
|
||||||
|
0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x2f, 0x1e, 0xc9, 0x31, 0x7c, 0x78, 0x24, 0xc7, 0xb8, 0xe2,
|
||||||
|
0xb1, 0x1c, 0xe3, 0x89, 0xc7, 0x72, 0x8c, 0x5c, 0xc2, 0xc9, 0xf9, 0xb9, 0x7a, 0x68, 0x56, 0x3b,
|
||||||
|
0xf1, 0xc2, 0x2c, 0x0e, 0x00, 0x89, 0x04, 0x30, 0x46, 0xb1, 0x96, 0x54, 0x16, 0xa4, 0x16, 0xff,
|
||||||
|
0x60, 0x64, 0x5c, 0xc4, 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0xa2, 0x25, 0x00,
|
||||||
|
0xaa, 0x45, 0x2f, 0x3c, 0x35, 0x27, 0xc7, 0x3b, 0x2f, 0xbf, 0x3c, 0x2f, 0x04, 0xa4, 0x32, 0x89,
|
||||||
|
0x0d, 0x6c, 0x96, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x8a, 0x1c, 0x64, 0x4e, 0xf6, 0x00, 0x00,
|
||||||
|
0x00,
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *Duration) Compare(that interface{}) int {
|
||||||
|
if that == nil {
|
||||||
|
if this == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
that1, ok := that.(*Duration)
|
||||||
|
if !ok {
|
||||||
|
that2, ok := that.(Duration)
|
||||||
|
if ok {
|
||||||
|
that1 = &that2
|
||||||
|
} else {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if that1 == nil {
|
||||||
|
if this == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
} else if this == nil {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
if this.Seconds != that1.Seconds {
|
||||||
|
if this.Seconds < that1.Seconds {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
if this.Nanos != that1.Nanos {
|
||||||
|
if this.Nanos < that1.Nanos {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
func (this *Duration) Equal(that interface{}) bool {
|
||||||
|
if that == nil {
|
||||||
|
return this == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
that1, ok := that.(*Duration)
|
||||||
|
if !ok {
|
||||||
|
that2, ok := that.(Duration)
|
||||||
|
if ok {
|
||||||
|
that1 = &that2
|
||||||
|
} else {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if that1 == nil {
|
||||||
|
return this == nil
|
||||||
|
} else if this == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if this.Seconds != that1.Seconds {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if this.Nanos != that1.Nanos {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
func (this *Duration) GoString() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
s := make([]string, 0, 6)
|
||||||
|
s = append(s, "&types.Duration{")
|
||||||
|
s = append(s, "Seconds: "+fmt.Sprintf("%#v", this.Seconds)+",\n")
|
||||||
|
s = append(s, "Nanos: "+fmt.Sprintf("%#v", this.Nanos)+",\n")
|
||||||
|
if this.XXX_unrecognized != nil {
|
||||||
|
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||||
|
}
|
||||||
|
s = append(s, "}")
|
||||||
|
return strings.Join(s, "")
|
||||||
|
}
|
||||||
|
func valueToGoStringDuration(v interface{}, typ string) string {
|
||||||
|
rv := reflect.ValueOf(v)
|
||||||
|
if rv.IsNil() {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
pv := reflect.Indirect(rv).Interface()
|
||||||
|
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
|
||||||
|
}
|
||||||
|
func (m *Duration) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Duration) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
size := m.Size()
|
||||||
|
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Duration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||||
|
i := len(dAtA)
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if m.XXX_unrecognized != nil {
|
||||||
|
i -= len(m.XXX_unrecognized)
|
||||||
|
copy(dAtA[i:], m.XXX_unrecognized)
|
||||||
|
}
|
||||||
|
if m.Nanos != 0 {
|
||||||
|
i = encodeVarintDuration(dAtA, i, uint64(m.Nanos))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x10
|
||||||
|
}
|
||||||
|
if m.Seconds != 0 {
|
||||||
|
i = encodeVarintDuration(dAtA, i, uint64(m.Seconds))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x8
|
||||||
|
}
|
||||||
|
return len(dAtA) - i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeVarintDuration(dAtA []byte, offset int, v uint64) int {
|
||||||
|
offset -= sovDuration(v)
|
||||||
|
base := offset
|
||||||
|
for v >= 1<<7 {
|
||||||
|
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||||
|
v >>= 7
|
||||||
|
offset++
|
||||||
|
}
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
return base
|
||||||
|
}
|
||||||
|
func (m *Duration) Size() (n int) {
|
||||||
|
if m == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if m.Seconds != 0 {
|
||||||
|
n += 1 + sovDuration(uint64(m.Seconds))
|
||||||
|
}
|
||||||
|
if m.Nanos != 0 {
|
||||||
|
n += 1 + sovDuration(uint64(m.Nanos))
|
||||||
|
}
|
||||||
|
if m.XXX_unrecognized != nil {
|
||||||
|
n += len(m.XXX_unrecognized)
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func sovDuration(x uint64) (n int) {
|
||||||
|
return (math_bits.Len64(x|1) + 6) / 7
|
||||||
|
}
|
||||||
|
func sozDuration(x uint64) (n int) {
|
||||||
|
return sovDuration(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||||
|
}
|
||||||
|
func (m *Duration) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowDuration
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= uint64(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: Duration: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: Duration: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 0 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType)
|
||||||
|
}
|
||||||
|
m.Seconds = 0
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowDuration
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
m.Seconds |= int64(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case 2:
|
||||||
|
if wireType != 0 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType)
|
||||||
|
}
|
||||||
|
m.Nanos = 0
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowDuration
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
m.Nanos |= int32(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipDuration(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||||
|
return ErrInvalidLengthDuration
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func skipDuration(dAtA []byte) (n int, err error) {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
depth := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowDuration
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
switch wireType {
|
||||||
|
case 0:
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowDuration
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx++
|
||||||
|
if dAtA[iNdEx-1] < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case 1:
|
||||||
|
iNdEx += 8
|
||||||
|
case 2:
|
||||||
|
var length int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowDuration
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
length |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if length < 0 {
|
||||||
|
return 0, ErrInvalidLengthDuration
|
||||||
|
}
|
||||||
|
iNdEx += length
|
||||||
|
case 3:
|
||||||
|
depth++
|
||||||
|
case 4:
|
||||||
|
if depth == 0 {
|
||||||
|
return 0, ErrUnexpectedEndOfGroupDuration
|
||||||
|
}
|
||||||
|
depth--
|
||||||
|
case 5:
|
||||||
|
iNdEx += 4
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||||
|
}
|
||||||
|
if iNdEx < 0 {
|
||||||
|
return 0, ErrInvalidLengthDuration
|
||||||
|
}
|
||||||
|
if depth == 0 {
|
||||||
|
return iNdEx, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrInvalidLengthDuration = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||||
|
ErrIntOverflowDuration = fmt.Errorf("proto: integer overflow")
|
||||||
|
ErrUnexpectedEndOfGroupDuration = fmt.Errorf("proto: unexpected end of group")
|
||||||
|
)
|
|
@ -0,0 +1,100 @@
|
||||||
|
// Protocol Buffers for Go with Gadgets
|
||||||
|
//
|
||||||
|
// Copyright (c) 2016, The GoGo Authors. All rights reserved.
|
||||||
|
// http://github.com/gogo/protobuf
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
package types
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func NewPopulatedDuration(r interface {
|
||||||
|
Int63() int64
|
||||||
|
}, easy bool) *Duration {
|
||||||
|
this := &Duration{}
|
||||||
|
maxSecs := time.Hour.Nanoseconds() / 1e9
|
||||||
|
max := 2 * maxSecs
|
||||||
|
s := int64(r.Int63()) % max
|
||||||
|
s -= maxSecs
|
||||||
|
neg := int64(1)
|
||||||
|
if s < 0 {
|
||||||
|
neg = -1
|
||||||
|
}
|
||||||
|
this.Seconds = s
|
||||||
|
this.Nanos = int32(neg * (r.Int63() % 1e9))
|
||||||
|
return this
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Duration) String() string {
|
||||||
|
td, err := DurationFromProto(d)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Sprintf("(%v)", err)
|
||||||
|
}
|
||||||
|
return td.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewPopulatedStdDuration(r interface {
|
||||||
|
Int63() int64
|
||||||
|
}, easy bool) *time.Duration {
|
||||||
|
dur := NewPopulatedDuration(r, easy)
|
||||||
|
d, err := DurationFromProto(dur)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &d
|
||||||
|
}
|
||||||
|
|
||||||
|
func SizeOfStdDuration(d time.Duration) int {
|
||||||
|
dur := DurationProto(d)
|
||||||
|
return dur.Size()
|
||||||
|
}
|
||||||
|
|
||||||
|
func StdDurationMarshal(d time.Duration) ([]byte, error) {
|
||||||
|
size := SizeOfStdDuration(d)
|
||||||
|
buf := make([]byte, size)
|
||||||
|
_, err := StdDurationMarshalTo(d, buf)
|
||||||
|
return buf, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func StdDurationMarshalTo(d time.Duration, data []byte) (int, error) {
|
||||||
|
dur := DurationProto(d)
|
||||||
|
return dur.MarshalTo(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func StdDurationUnmarshal(d *time.Duration, data []byte) error {
|
||||||
|
dur := &Duration{}
|
||||||
|
if err := dur.Unmarshal(data); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
dd, err := DurationFromProto(dur)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*d = dd
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,462 @@
|
||||||
|
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||||
|
// source: google/protobuf/empty.proto
|
||||||
|
|
||||||
|
package types
|
||||||
|
|
||||||
|
import (
|
||||||
|
bytes "bytes"
|
||||||
|
fmt "fmt"
|
||||||
|
proto "github.com/gogo/protobuf/proto"
|
||||||
|
io "io"
|
||||||
|
math "math"
|
||||||
|
math_bits "math/bits"
|
||||||
|
reflect "reflect"
|
||||||
|
strings "strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
var _ = proto.Marshal
|
||||||
|
var _ = fmt.Errorf
|
||||||
|
var _ = math.Inf
|
||||||
|
|
||||||
|
// This is a compile-time assertion to ensure that this generated file
|
||||||
|
// is compatible with the proto package it is being compiled against.
|
||||||
|
// A compilation error at this line likely means your copy of the
|
||||||
|
// proto package needs to be updated.
|
||||||
|
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
|
||||||
|
|
||||||
|
// A generic empty message that you can re-use to avoid defining duplicated
|
||||||
|
// empty messages in your APIs. A typical example is to use it as the request
|
||||||
|
// or the response type of an API method. For instance:
|
||||||
|
//
|
||||||
|
// service Foo {
|
||||||
|
// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// The JSON representation for `Empty` is empty JSON object `{}`.
|
||||||
|
type Empty struct {
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Empty) Reset() { *m = Empty{} }
|
||||||
|
func (*Empty) ProtoMessage() {}
|
||||||
|
func (*Empty) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_900544acb223d5b8, []int{0}
|
||||||
|
}
|
||||||
|
func (*Empty) XXX_WellKnownType() string { return "Empty" }
|
||||||
|
func (m *Empty) XXX_Unmarshal(b []byte) error {
|
||||||
|
return m.Unmarshal(b)
|
||||||
|
}
|
||||||
|
func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
if deterministic {
|
||||||
|
return xxx_messageInfo_Empty.Marshal(b, m, deterministic)
|
||||||
|
} else {
|
||||||
|
b = b[:cap(b)]
|
||||||
|
n, err := m.MarshalToSizedBuffer(b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return b[:n], nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func (m *Empty) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_Empty.Merge(m, src)
|
||||||
|
}
|
||||||
|
func (m *Empty) XXX_Size() int {
|
||||||
|
return m.Size()
|
||||||
|
}
|
||||||
|
func (m *Empty) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_Empty.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_Empty proto.InternalMessageInfo
|
||||||
|
|
||||||
|
func (*Empty) XXX_MessageName() string {
|
||||||
|
return "google.protobuf.Empty"
|
||||||
|
}
|
||||||
|
func init() {
|
||||||
|
proto.RegisterType((*Empty)(nil), "google.protobuf.Empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() { proto.RegisterFile("google/protobuf/empty.proto", fileDescriptor_900544acb223d5b8) }
|
||||||
|
|
||||||
|
var fileDescriptor_900544acb223d5b8 = []byte{
|
||||||
|
// 176 bytes of a gzipped FileDescriptorProto
|
||||||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0xcf, 0xcf, 0x4f,
|
||||||
|
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcd, 0x2d, 0x28,
|
||||||
|
0xa9, 0xd4, 0x03, 0x73, 0x85, 0xf8, 0x21, 0x92, 0x7a, 0x30, 0x49, 0x25, 0x76, 0x2e, 0x56, 0x57,
|
||||||
|
0x90, 0xbc, 0x53, 0x0b, 0xe3, 0x8d, 0x87, 0x72, 0x0c, 0x1f, 0x1e, 0xca, 0x31, 0xfe, 0x78, 0x28,
|
||||||
|
0xc7, 0xd8, 0xf0, 0x48, 0x8e, 0x71, 0xc5, 0x23, 0x39, 0xc6, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c,
|
||||||
|
0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0xf1, 0xc5, 0x23, 0x39, 0x86, 0x0f, 0x20, 0xf1, 0xc7, 0x72,
|
||||||
|
0x8c, 0x27, 0x1e, 0xcb, 0x31, 0x72, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, 0x19, 0xe8, 0xc4, 0x05,
|
||||||
|
0x36, 0x2e, 0x00, 0xc4, 0x0d, 0x60, 0x8c, 0x62, 0x2d, 0xa9, 0x2c, 0x48, 0x2d, 0xfe, 0xc1, 0xc8,
|
||||||
|
0xb8, 0x88, 0x89, 0xd9, 0x3d, 0xc0, 0x69, 0x15, 0x93, 0x9c, 0x3b, 0x44, 0x7d, 0x00, 0x54, 0xbd,
|
||||||
|
0x5e, 0x78, 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x65, 0x12, 0x1b, 0xd8,
|
||||||
|
0x20, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x21, 0xbe, 0xb6, 0x31, 0xc6, 0x00, 0x00, 0x00,
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *Empty) Compare(that interface{}) int {
|
||||||
|
if that == nil {
|
||||||
|
if this == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
that1, ok := that.(*Empty)
|
||||||
|
if !ok {
|
||||||
|
that2, ok := that.(Empty)
|
||||||
|
if ok {
|
||||||
|
that1 = &that2
|
||||||
|
} else {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if that1 == nil {
|
||||||
|
if this == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
} else if this == nil {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
func (this *Empty) Equal(that interface{}) bool {
|
||||||
|
if that == nil {
|
||||||
|
return this == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
that1, ok := that.(*Empty)
|
||||||
|
if !ok {
|
||||||
|
that2, ok := that.(Empty)
|
||||||
|
if ok {
|
||||||
|
that1 = &that2
|
||||||
|
} else {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if that1 == nil {
|
||||||
|
return this == nil
|
||||||
|
} else if this == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
func (this *Empty) GoString() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
s := make([]string, 0, 4)
|
||||||
|
s = append(s, "&types.Empty{")
|
||||||
|
if this.XXX_unrecognized != nil {
|
||||||
|
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||||
|
}
|
||||||
|
s = append(s, "}")
|
||||||
|
return strings.Join(s, "")
|
||||||
|
}
|
||||||
|
func valueToGoStringEmpty(v interface{}, typ string) string {
|
||||||
|
rv := reflect.ValueOf(v)
|
||||||
|
if rv.IsNil() {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
pv := reflect.Indirect(rv).Interface()
|
||||||
|
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
|
||||||
|
}
|
||||||
|
func (m *Empty) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Empty) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
size := m.Size()
|
||||||
|
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Empty) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||||
|
i := len(dAtA)
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if m.XXX_unrecognized != nil {
|
||||||
|
i -= len(m.XXX_unrecognized)
|
||||||
|
copy(dAtA[i:], m.XXX_unrecognized)
|
||||||
|
}
|
||||||
|
return len(dAtA) - i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeVarintEmpty(dAtA []byte, offset int, v uint64) int {
|
||||||
|
offset -= sovEmpty(v)
|
||||||
|
base := offset
|
||||||
|
for v >= 1<<7 {
|
||||||
|
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||||
|
v >>= 7
|
||||||
|
offset++
|
||||||
|
}
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
return base
|
||||||
|
}
|
||||||
|
func NewPopulatedEmpty(r randyEmpty, easy bool) *Empty {
|
||||||
|
this := &Empty{}
|
||||||
|
if !easy && r.Intn(10) != 0 {
|
||||||
|
this.XXX_unrecognized = randUnrecognizedEmpty(r, 1)
|
||||||
|
}
|
||||||
|
return this
|
||||||
|
}
|
||||||
|
|
||||||
|
type randyEmpty interface {
|
||||||
|
Float32() float32
|
||||||
|
Float64() float64
|
||||||
|
Int63() int64
|
||||||
|
Int31() int32
|
||||||
|
Uint32() uint32
|
||||||
|
Intn(n int) int
|
||||||
|
}
|
||||||
|
|
||||||
|
func randUTF8RuneEmpty(r randyEmpty) rune {
|
||||||
|
ru := r.Intn(62)
|
||||||
|
if ru < 10 {
|
||||||
|
return rune(ru + 48)
|
||||||
|
} else if ru < 36 {
|
||||||
|
return rune(ru + 55)
|
||||||
|
}
|
||||||
|
return rune(ru + 61)
|
||||||
|
}
|
||||||
|
func randStringEmpty(r randyEmpty) string {
|
||||||
|
v1 := r.Intn(100)
|
||||||
|
tmps := make([]rune, v1)
|
||||||
|
for i := 0; i < v1; i++ {
|
||||||
|
tmps[i] = randUTF8RuneEmpty(r)
|
||||||
|
}
|
||||||
|
return string(tmps)
|
||||||
|
}
|
||||||
|
func randUnrecognizedEmpty(r randyEmpty, maxFieldNumber int) (dAtA []byte) {
|
||||||
|
l := r.Intn(5)
|
||||||
|
for i := 0; i < l; i++ {
|
||||||
|
wire := r.Intn(4)
|
||||||
|
if wire == 3 {
|
||||||
|
wire = 5
|
||||||
|
}
|
||||||
|
fieldNumber := maxFieldNumber + r.Intn(100)
|
||||||
|
dAtA = randFieldEmpty(dAtA, r, fieldNumber, wire)
|
||||||
|
}
|
||||||
|
return dAtA
|
||||||
|
}
|
||||||
|
func randFieldEmpty(dAtA []byte, r randyEmpty, fieldNumber int, wire int) []byte {
|
||||||
|
key := uint32(fieldNumber)<<3 | uint32(wire)
|
||||||
|
switch wire {
|
||||||
|
case 0:
|
||||||
|
dAtA = encodeVarintPopulateEmpty(dAtA, uint64(key))
|
||||||
|
v2 := r.Int63()
|
||||||
|
if r.Intn(2) == 0 {
|
||||||
|
v2 *= -1
|
||||||
|
}
|
||||||
|
dAtA = encodeVarintPopulateEmpty(dAtA, uint64(v2))
|
||||||
|
case 1:
|
||||||
|
dAtA = encodeVarintPopulateEmpty(dAtA, uint64(key))
|
||||||
|
dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)))
|
||||||
|
case 2:
|
||||||
|
dAtA = encodeVarintPopulateEmpty(dAtA, uint64(key))
|
||||||
|
ll := r.Intn(100)
|
||||||
|
dAtA = encodeVarintPopulateEmpty(dAtA, uint64(ll))
|
||||||
|
for j := 0; j < ll; j++ {
|
||||||
|
dAtA = append(dAtA, byte(r.Intn(256)))
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
dAtA = encodeVarintPopulateEmpty(dAtA, uint64(key))
|
||||||
|
dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)))
|
||||||
|
}
|
||||||
|
return dAtA
|
||||||
|
}
|
||||||
|
func encodeVarintPopulateEmpty(dAtA []byte, v uint64) []byte {
|
||||||
|
for v >= 1<<7 {
|
||||||
|
dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80))
|
||||||
|
v >>= 7
|
||||||
|
}
|
||||||
|
dAtA = append(dAtA, uint8(v))
|
||||||
|
return dAtA
|
||||||
|
}
|
||||||
|
func (m *Empty) Size() (n int) {
|
||||||
|
if m == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if m.XXX_unrecognized != nil {
|
||||||
|
n += len(m.XXX_unrecognized)
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func sovEmpty(x uint64) (n int) {
|
||||||
|
return (math_bits.Len64(x|1) + 6) / 7
|
||||||
|
}
|
||||||
|
func sozEmpty(x uint64) (n int) {
|
||||||
|
return sovEmpty(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||||
|
}
|
||||||
|
func (this *Empty) String() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
s := strings.Join([]string{`&Empty{`,
|
||||||
|
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||||
|
`}`,
|
||||||
|
}, "")
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
func valueToStringEmpty(v interface{}) string {
|
||||||
|
rv := reflect.ValueOf(v)
|
||||||
|
if rv.IsNil() {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
pv := reflect.Indirect(rv).Interface()
|
||||||
|
return fmt.Sprintf("*%v", pv)
|
||||||
|
}
|
||||||
|
func (m *Empty) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowEmpty
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= uint64(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: Empty: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: Empty: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipEmpty(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||||
|
return ErrInvalidLengthEmpty
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func skipEmpty(dAtA []byte) (n int, err error) {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
depth := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowEmpty
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
switch wireType {
|
||||||
|
case 0:
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowEmpty
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx++
|
||||||
|
if dAtA[iNdEx-1] < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case 1:
|
||||||
|
iNdEx += 8
|
||||||
|
case 2:
|
||||||
|
var length int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowEmpty
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
length |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if length < 0 {
|
||||||
|
return 0, ErrInvalidLengthEmpty
|
||||||
|
}
|
||||||
|
iNdEx += length
|
||||||
|
case 3:
|
||||||
|
depth++
|
||||||
|
case 4:
|
||||||
|
if depth == 0 {
|
||||||
|
return 0, ErrUnexpectedEndOfGroupEmpty
|
||||||
|
}
|
||||||
|
depth--
|
||||||
|
case 5:
|
||||||
|
iNdEx += 4
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||||
|
}
|
||||||
|
if iNdEx < 0 {
|
||||||
|
return 0, ErrInvalidLengthEmpty
|
||||||
|
}
|
||||||
|
if depth == 0 {
|
||||||
|
return iNdEx, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrInvalidLengthEmpty = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||||
|
ErrIntOverflowEmpty = fmt.Errorf("proto: integer overflow")
|
||||||
|
ErrUnexpectedEndOfGroupEmpty = fmt.Errorf("proto: unexpected end of group")
|
||||||
|
)
|
|
@ -0,0 +1,738 @@
|
||||||
|
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||||
|
// source: google/protobuf/field_mask.proto
|
||||||
|
|
||||||
|
package types
|
||||||
|
|
||||||
|
import (
|
||||||
|
bytes "bytes"
|
||||||
|
fmt "fmt"
|
||||||
|
proto "github.com/gogo/protobuf/proto"
|
||||||
|
io "io"
|
||||||
|
math "math"
|
||||||
|
math_bits "math/bits"
|
||||||
|
reflect "reflect"
|
||||||
|
strings "strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
var _ = proto.Marshal
|
||||||
|
var _ = fmt.Errorf
|
||||||
|
var _ = math.Inf
|
||||||
|
|
||||||
|
// This is a compile-time assertion to ensure that this generated file
|
||||||
|
// is compatible with the proto package it is being compiled against.
|
||||||
|
// A compilation error at this line likely means your copy of the
|
||||||
|
// proto package needs to be updated.
|
||||||
|
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
|
||||||
|
|
||||||
|
// `FieldMask` represents a set of symbolic field paths, for example:
|
||||||
|
//
|
||||||
|
// paths: "f.a"
|
||||||
|
// paths: "f.b.d"
|
||||||
|
//
|
||||||
|
// Here `f` represents a field in some root message, `a` and `b`
|
||||||
|
// fields in the message found in `f`, and `d` a field found in the
|
||||||
|
// message in `f.b`.
|
||||||
|
//
|
||||||
|
// Field masks are used to specify a subset of fields that should be
|
||||||
|
// returned by a get operation or modified by an update operation.
|
||||||
|
// Field masks also have a custom JSON encoding (see below).
|
||||||
|
//
|
||||||
|
// # Field Masks in Projections
|
||||||
|
//
|
||||||
|
// When used in the context of a projection, a response message or
|
||||||
|
// sub-message is filtered by the API to only contain those fields as
|
||||||
|
// specified in the mask. For example, if the mask in the previous
|
||||||
|
// example is applied to a response message as follows:
|
||||||
|
//
|
||||||
|
// f {
|
||||||
|
// a : 22
|
||||||
|
// b {
|
||||||
|
// d : 1
|
||||||
|
// x : 2
|
||||||
|
// }
|
||||||
|
// y : 13
|
||||||
|
// }
|
||||||
|
// z: 8
|
||||||
|
//
|
||||||
|
// The result will not contain specific values for fields x,y and z
|
||||||
|
// (their value will be set to the default, and omitted in proto text
|
||||||
|
// output):
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// f {
|
||||||
|
// a : 22
|
||||||
|
// b {
|
||||||
|
// d : 1
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// A repeated field is not allowed except at the last position of a
|
||||||
|
// paths string.
|
||||||
|
//
|
||||||
|
// If a FieldMask object is not present in a get operation, the
|
||||||
|
// operation applies to all fields (as if a FieldMask of all fields
|
||||||
|
// had been specified).
|
||||||
|
//
|
||||||
|
// Note that a field mask does not necessarily apply to the
|
||||||
|
// top-level response message. In case of a REST get operation, the
|
||||||
|
// field mask applies directly to the response, but in case of a REST
|
||||||
|
// list operation, the mask instead applies to each individual message
|
||||||
|
// in the returned resource list. In case of a REST custom method,
|
||||||
|
// other definitions may be used. Where the mask applies will be
|
||||||
|
// clearly documented together with its declaration in the API. In
|
||||||
|
// any case, the effect on the returned resource/resources is required
|
||||||
|
// behavior for APIs.
|
||||||
|
//
|
||||||
|
// # Field Masks in Update Operations
|
||||||
|
//
|
||||||
|
// A field mask in update operations specifies which fields of the
|
||||||
|
// targeted resource are going to be updated. The API is required
|
||||||
|
// to only change the values of the fields as specified in the mask
|
||||||
|
// and leave the others untouched. If a resource is passed in to
|
||||||
|
// describe the updated values, the API ignores the values of all
|
||||||
|
// fields not covered by the mask.
|
||||||
|
//
|
||||||
|
// If a repeated field is specified for an update operation, new values will
|
||||||
|
// be appended to the existing repeated field in the target resource. Note that
|
||||||
|
// a repeated field is only allowed in the last position of a `paths` string.
|
||||||
|
//
|
||||||
|
// If a sub-message is specified in the last position of the field mask for an
|
||||||
|
// update operation, then new value will be merged into the existing sub-message
|
||||||
|
// in the target resource.
|
||||||
|
//
|
||||||
|
// For example, given the target message:
|
||||||
|
//
|
||||||
|
// f {
|
||||||
|
// b {
|
||||||
|
// d: 1
|
||||||
|
// x: 2
|
||||||
|
// }
|
||||||
|
// c: [1]
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// And an update message:
|
||||||
|
//
|
||||||
|
// f {
|
||||||
|
// b {
|
||||||
|
// d: 10
|
||||||
|
// }
|
||||||
|
// c: [2]
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// then if the field mask is:
|
||||||
|
//
|
||||||
|
// paths: ["f.b", "f.c"]
|
||||||
|
//
|
||||||
|
// then the result will be:
|
||||||
|
//
|
||||||
|
// f {
|
||||||
|
// b {
|
||||||
|
// d: 10
|
||||||
|
// x: 2
|
||||||
|
// }
|
||||||
|
// c: [1, 2]
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// An implementation may provide options to override this default behavior for
|
||||||
|
// repeated and message fields.
|
||||||
|
//
|
||||||
|
// In order to reset a field's value to the default, the field must
|
||||||
|
// be in the mask and set to the default value in the provided resource.
|
||||||
|
// Hence, in order to reset all fields of a resource, provide a default
|
||||||
|
// instance of the resource and set all fields in the mask, or do
|
||||||
|
// not provide a mask as described below.
|
||||||
|
//
|
||||||
|
// If a field mask is not present on update, the operation applies to
|
||||||
|
// all fields (as if a field mask of all fields has been specified).
|
||||||
|
// Note that in the presence of schema evolution, this may mean that
|
||||||
|
// fields the client does not know and has therefore not filled into
|
||||||
|
// the request will be reset to their default. If this is unwanted
|
||||||
|
// behavior, a specific service may require a client to always specify
|
||||||
|
// a field mask, producing an error if not.
|
||||||
|
//
|
||||||
|
// As with get operations, the location of the resource which
|
||||||
|
// describes the updated values in the request message depends on the
|
||||||
|
// operation kind. In any case, the effect of the field mask is
|
||||||
|
// required to be honored by the API.
|
||||||
|
//
|
||||||
|
// ## Considerations for HTTP REST
|
||||||
|
//
|
||||||
|
// The HTTP kind of an update operation which uses a field mask must
|
||||||
|
// be set to PATCH instead of PUT in order to satisfy HTTP semantics
|
||||||
|
// (PUT must only be used for full updates).
|
||||||
|
//
|
||||||
|
// # JSON Encoding of Field Masks
|
||||||
|
//
|
||||||
|
// In JSON, a field mask is encoded as a single string where paths are
|
||||||
|
// separated by a comma. Fields name in each path are converted
|
||||||
|
// to/from lower-camel naming conventions.
|
||||||
|
//
|
||||||
|
// As an example, consider the following message declarations:
|
||||||
|
//
|
||||||
|
// message Profile {
|
||||||
|
// User user = 1;
|
||||||
|
// Photo photo = 2;
|
||||||
|
// }
|
||||||
|
// message User {
|
||||||
|
// string display_name = 1;
|
||||||
|
// string address = 2;
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// In proto a field mask for `Profile` may look as such:
|
||||||
|
//
|
||||||
|
// mask {
|
||||||
|
// paths: "user.display_name"
|
||||||
|
// paths: "photo"
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// In JSON, the same mask is represented as below:
|
||||||
|
//
|
||||||
|
// {
|
||||||
|
// mask: "user.displayName,photo"
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// # Field Masks and Oneof Fields
|
||||||
|
//
|
||||||
|
// Field masks treat fields in oneofs just as regular fields. Consider the
|
||||||
|
// following message:
|
||||||
|
//
|
||||||
|
// message SampleMessage {
|
||||||
|
// oneof test_oneof {
|
||||||
|
// string name = 4;
|
||||||
|
// SubMessage sub_message = 9;
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// The field mask can be:
|
||||||
|
//
|
||||||
|
// mask {
|
||||||
|
// paths: "name"
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Or:
|
||||||
|
//
|
||||||
|
// mask {
|
||||||
|
// paths: "sub_message"
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Note that oneof type names ("test_oneof" in this case) cannot be used in
|
||||||
|
// paths.
|
||||||
|
//
|
||||||
|
// ## Field Mask Verification
|
||||||
|
//
|
||||||
|
// The implementation of any API method which has a FieldMask type field in the
|
||||||
|
// request should verify the included field paths, and return an
|
||||||
|
// `INVALID_ARGUMENT` error if any path is duplicated or unmappable.
|
||||||
|
type FieldMask struct {
|
||||||
|
// The set of field mask paths.
|
||||||
|
Paths []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *FieldMask) Reset() { *m = FieldMask{} }
|
||||||
|
func (*FieldMask) ProtoMessage() {}
|
||||||
|
func (*FieldMask) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_5158202634f0da48, []int{0}
|
||||||
|
}
|
||||||
|
func (m *FieldMask) XXX_Unmarshal(b []byte) error {
|
||||||
|
return m.Unmarshal(b)
|
||||||
|
}
|
||||||
|
func (m *FieldMask) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
if deterministic {
|
||||||
|
return xxx_messageInfo_FieldMask.Marshal(b, m, deterministic)
|
||||||
|
} else {
|
||||||
|
b = b[:cap(b)]
|
||||||
|
n, err := m.MarshalToSizedBuffer(b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return b[:n], nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func (m *FieldMask) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_FieldMask.Merge(m, src)
|
||||||
|
}
|
||||||
|
func (m *FieldMask) XXX_Size() int {
|
||||||
|
return m.Size()
|
||||||
|
}
|
||||||
|
func (m *FieldMask) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_FieldMask.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_FieldMask proto.InternalMessageInfo
|
||||||
|
|
||||||
|
func (m *FieldMask) GetPaths() []string {
|
||||||
|
if m != nil {
|
||||||
|
return m.Paths
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*FieldMask) XXX_MessageName() string {
|
||||||
|
return "google.protobuf.FieldMask"
|
||||||
|
}
|
||||||
|
func init() {
|
||||||
|
proto.RegisterType((*FieldMask)(nil), "google.protobuf.FieldMask")
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() { proto.RegisterFile("google/protobuf/field_mask.proto", fileDescriptor_5158202634f0da48) }
|
||||||
|
|
||||||
|
var fileDescriptor_5158202634f0da48 = []byte{
|
||||||
|
// 203 bytes of a gzipped FileDescriptorProto
|
||||||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x48, 0xcf, 0xcf, 0x4f,
|
||||||
|
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcb, 0x4c, 0xcd,
|
||||||
|
0x49, 0x89, 0xcf, 0x4d, 0x2c, 0xce, 0xd6, 0x03, 0x8b, 0x09, 0xf1, 0x43, 0x54, 0xe8, 0xc1, 0x54,
|
||||||
|
0x28, 0x29, 0x72, 0x71, 0xba, 0x81, 0x14, 0xf9, 0x26, 0x16, 0x67, 0x0b, 0x89, 0x70, 0xb1, 0x16,
|
||||||
|
0x24, 0x96, 0x64, 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6b, 0x70, 0x06, 0x41, 0x38, 0x4e, 0x1d, 0x8c,
|
||||||
|
0x37, 0x1e, 0xca, 0x31, 0x7c, 0x78, 0x28, 0xc7, 0xf8, 0xe3, 0xa1, 0x1c, 0x63, 0xc3, 0x23, 0x39,
|
||||||
|
0xc6, 0x15, 0x8f, 0xe4, 0x18, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23,
|
||||||
|
0x39, 0xc6, 0x17, 0x8f, 0xe4, 0x18, 0x3e, 0x80, 0xc4, 0x1f, 0xcb, 0x31, 0x9e, 0x78, 0x2c, 0xc7,
|
||||||
|
0xc8, 0x25, 0x9c, 0x9c, 0x9f, 0xab, 0x87, 0x66, 0x95, 0x13, 0x1f, 0xdc, 0xa2, 0x00, 0x90, 0x50,
|
||||||
|
0x00, 0x63, 0x14, 0x6b, 0x49, 0x65, 0x41, 0x6a, 0xf1, 0x0f, 0x46, 0xc6, 0x45, 0x4c, 0xcc, 0xee,
|
||||||
|
0x01, 0x4e, 0xab, 0x98, 0xe4, 0xdc, 0x21, 0x7a, 0x02, 0xa0, 0x7a, 0xf4, 0xc2, 0x53, 0x73, 0x72,
|
||||||
|
0xbc, 0xf3, 0xf2, 0xcb, 0xf3, 0x42, 0x40, 0x2a, 0x93, 0xd8, 0xc0, 0x86, 0x19, 0x03, 0x02, 0x00,
|
||||||
|
0x00, 0xff, 0xff, 0x43, 0xa0, 0x83, 0xd0, 0xe9, 0x00, 0x00, 0x00,
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *FieldMask) Compare(that interface{}) int {
|
||||||
|
if that == nil {
|
||||||
|
if this == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
that1, ok := that.(*FieldMask)
|
||||||
|
if !ok {
|
||||||
|
that2, ok := that.(FieldMask)
|
||||||
|
if ok {
|
||||||
|
that1 = &that2
|
||||||
|
} else {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if that1 == nil {
|
||||||
|
if this == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
} else if this == nil {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
if len(this.Paths) != len(that1.Paths) {
|
||||||
|
if len(this.Paths) < len(that1.Paths) {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
for i := range this.Paths {
|
||||||
|
if this.Paths[i] != that1.Paths[i] {
|
||||||
|
if this.Paths[i] < that1.Paths[i] {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
func (this *FieldMask) Equal(that interface{}) bool {
|
||||||
|
if that == nil {
|
||||||
|
return this == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
that1, ok := that.(*FieldMask)
|
||||||
|
if !ok {
|
||||||
|
that2, ok := that.(FieldMask)
|
||||||
|
if ok {
|
||||||
|
that1 = &that2
|
||||||
|
} else {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if that1 == nil {
|
||||||
|
return this == nil
|
||||||
|
} else if this == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if len(this.Paths) != len(that1.Paths) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for i := range this.Paths {
|
||||||
|
if this.Paths[i] != that1.Paths[i] {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
func (this *FieldMask) GoString() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
s := make([]string, 0, 5)
|
||||||
|
s = append(s, "&types.FieldMask{")
|
||||||
|
s = append(s, "Paths: "+fmt.Sprintf("%#v", this.Paths)+",\n")
|
||||||
|
if this.XXX_unrecognized != nil {
|
||||||
|
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||||
|
}
|
||||||
|
s = append(s, "}")
|
||||||
|
return strings.Join(s, "")
|
||||||
|
}
|
||||||
|
func valueToGoStringFieldMask(v interface{}, typ string) string {
|
||||||
|
rv := reflect.ValueOf(v)
|
||||||
|
if rv.IsNil() {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
pv := reflect.Indirect(rv).Interface()
|
||||||
|
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
|
||||||
|
}
|
||||||
|
func (m *FieldMask) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *FieldMask) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
size := m.Size()
|
||||||
|
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *FieldMask) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||||
|
i := len(dAtA)
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if m.XXX_unrecognized != nil {
|
||||||
|
i -= len(m.XXX_unrecognized)
|
||||||
|
copy(dAtA[i:], m.XXX_unrecognized)
|
||||||
|
}
|
||||||
|
if len(m.Paths) > 0 {
|
||||||
|
for iNdEx := len(m.Paths) - 1; iNdEx >= 0; iNdEx-- {
|
||||||
|
i -= len(m.Paths[iNdEx])
|
||||||
|
copy(dAtA[i:], m.Paths[iNdEx])
|
||||||
|
i = encodeVarintFieldMask(dAtA, i, uint64(len(m.Paths[iNdEx])))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return len(dAtA) - i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeVarintFieldMask(dAtA []byte, offset int, v uint64) int {
|
||||||
|
offset -= sovFieldMask(v)
|
||||||
|
base := offset
|
||||||
|
for v >= 1<<7 {
|
||||||
|
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||||
|
v >>= 7
|
||||||
|
offset++
|
||||||
|
}
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
return base
|
||||||
|
}
|
||||||
|
func NewPopulatedFieldMask(r randyFieldMask, easy bool) *FieldMask {
|
||||||
|
this := &FieldMask{}
|
||||||
|
v1 := r.Intn(10)
|
||||||
|
this.Paths = make([]string, v1)
|
||||||
|
for i := 0; i < v1; i++ {
|
||||||
|
this.Paths[i] = string(randStringFieldMask(r))
|
||||||
|
}
|
||||||
|
if !easy && r.Intn(10) != 0 {
|
||||||
|
this.XXX_unrecognized = randUnrecognizedFieldMask(r, 2)
|
||||||
|
}
|
||||||
|
return this
|
||||||
|
}
|
||||||
|
|
||||||
|
type randyFieldMask interface {
|
||||||
|
Float32() float32
|
||||||
|
Float64() float64
|
||||||
|
Int63() int64
|
||||||
|
Int31() int32
|
||||||
|
Uint32() uint32
|
||||||
|
Intn(n int) int
|
||||||
|
}
|
||||||
|
|
||||||
|
func randUTF8RuneFieldMask(r randyFieldMask) rune {
|
||||||
|
ru := r.Intn(62)
|
||||||
|
if ru < 10 {
|
||||||
|
return rune(ru + 48)
|
||||||
|
} else if ru < 36 {
|
||||||
|
return rune(ru + 55)
|
||||||
|
}
|
||||||
|
return rune(ru + 61)
|
||||||
|
}
|
||||||
|
func randStringFieldMask(r randyFieldMask) string {
|
||||||
|
v2 := r.Intn(100)
|
||||||
|
tmps := make([]rune, v2)
|
||||||
|
for i := 0; i < v2; i++ {
|
||||||
|
tmps[i] = randUTF8RuneFieldMask(r)
|
||||||
|
}
|
||||||
|
return string(tmps)
|
||||||
|
}
|
||||||
|
func randUnrecognizedFieldMask(r randyFieldMask, maxFieldNumber int) (dAtA []byte) {
|
||||||
|
l := r.Intn(5)
|
||||||
|
for i := 0; i < l; i++ {
|
||||||
|
wire := r.Intn(4)
|
||||||
|
if wire == 3 {
|
||||||
|
wire = 5
|
||||||
|
}
|
||||||
|
fieldNumber := maxFieldNumber + r.Intn(100)
|
||||||
|
dAtA = randFieldFieldMask(dAtA, r, fieldNumber, wire)
|
||||||
|
}
|
||||||
|
return dAtA
|
||||||
|
}
|
||||||
|
func randFieldFieldMask(dAtA []byte, r randyFieldMask, fieldNumber int, wire int) []byte {
|
||||||
|
key := uint32(fieldNumber)<<3 | uint32(wire)
|
||||||
|
switch wire {
|
||||||
|
case 0:
|
||||||
|
dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(key))
|
||||||
|
v3 := r.Int63()
|
||||||
|
if r.Intn(2) == 0 {
|
||||||
|
v3 *= -1
|
||||||
|
}
|
||||||
|
dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(v3))
|
||||||
|
case 1:
|
||||||
|
dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(key))
|
||||||
|
dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)))
|
||||||
|
case 2:
|
||||||
|
dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(key))
|
||||||
|
ll := r.Intn(100)
|
||||||
|
dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(ll))
|
||||||
|
for j := 0; j < ll; j++ {
|
||||||
|
dAtA = append(dAtA, byte(r.Intn(256)))
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(key))
|
||||||
|
dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)))
|
||||||
|
}
|
||||||
|
return dAtA
|
||||||
|
}
|
||||||
|
func encodeVarintPopulateFieldMask(dAtA []byte, v uint64) []byte {
|
||||||
|
for v >= 1<<7 {
|
||||||
|
dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80))
|
||||||
|
v >>= 7
|
||||||
|
}
|
||||||
|
dAtA = append(dAtA, uint8(v))
|
||||||
|
return dAtA
|
||||||
|
}
|
||||||
|
func (m *FieldMask) Size() (n int) {
|
||||||
|
if m == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if len(m.Paths) > 0 {
|
||||||
|
for _, s := range m.Paths {
|
||||||
|
l = len(s)
|
||||||
|
n += 1 + l + sovFieldMask(uint64(l))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if m.XXX_unrecognized != nil {
|
||||||
|
n += len(m.XXX_unrecognized)
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func sovFieldMask(x uint64) (n int) {
|
||||||
|
return (math_bits.Len64(x|1) + 6) / 7
|
||||||
|
}
|
||||||
|
func sozFieldMask(x uint64) (n int) {
|
||||||
|
return sovFieldMask(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||||
|
}
|
||||||
|
func (this *FieldMask) String() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
s := strings.Join([]string{`&FieldMask{`,
|
||||||
|
`Paths:` + fmt.Sprintf("%v", this.Paths) + `,`,
|
||||||
|
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||||
|
`}`,
|
||||||
|
}, "")
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
func valueToStringFieldMask(v interface{}) string {
|
||||||
|
rv := reflect.ValueOf(v)
|
||||||
|
if rv.IsNil() {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
pv := reflect.Indirect(rv).Interface()
|
||||||
|
return fmt.Sprintf("*%v", pv)
|
||||||
|
}
|
||||||
|
func (m *FieldMask) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowFieldMask
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= uint64(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: FieldMask: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: FieldMask: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowFieldMask
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= uint64(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthFieldMask
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex < 0 {
|
||||||
|
return ErrInvalidLengthFieldMask
|
||||||
|
}
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Paths = append(m.Paths, string(dAtA[iNdEx:postIndex]))
|
||||||
|
iNdEx = postIndex
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipFieldMask(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||||
|
return ErrInvalidLengthFieldMask
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func skipFieldMask(dAtA []byte) (n int, err error) {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
depth := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowFieldMask
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
switch wireType {
|
||||||
|
case 0:
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowFieldMask
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx++
|
||||||
|
if dAtA[iNdEx-1] < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case 1:
|
||||||
|
iNdEx += 8
|
||||||
|
case 2:
|
||||||
|
var length int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowFieldMask
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
length |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if length < 0 {
|
||||||
|
return 0, ErrInvalidLengthFieldMask
|
||||||
|
}
|
||||||
|
iNdEx += length
|
||||||
|
case 3:
|
||||||
|
depth++
|
||||||
|
case 4:
|
||||||
|
if depth == 0 {
|
||||||
|
return 0, ErrUnexpectedEndOfGroupFieldMask
|
||||||
|
}
|
||||||
|
depth--
|
||||||
|
case 5:
|
||||||
|
iNdEx += 4
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||||
|
}
|
||||||
|
if iNdEx < 0 {
|
||||||
|
return 0, ErrInvalidLengthFieldMask
|
||||||
|
}
|
||||||
|
if depth == 0 {
|
||||||
|
return iNdEx, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrInvalidLengthFieldMask = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||||
|
ErrIntOverflowFieldMask = fmt.Errorf("proto: integer overflow")
|
||||||
|
ErrUnexpectedEndOfGroupFieldMask = fmt.Errorf("proto: unexpected end of group")
|
||||||
|
)
|
|
@ -0,0 +1,34 @@
|
||||||
|
package types
|
||||||
|
|
||||||
|
func (m *Any) ProtoSize() (n int) { return m.Size() }
|
||||||
|
func (m *Api) ProtoSize() (n int) { return m.Size() }
|
||||||
|
func (m *Method) ProtoSize() (n int) { return m.Size() }
|
||||||
|
func (m *Mixin) ProtoSize() (n int) { return m.Size() }
|
||||||
|
func (m *Duration) ProtoSize() (n int) { return m.Size() }
|
||||||
|
func (m *Empty) ProtoSize() (n int) { return m.Size() }
|
||||||
|
func (m *FieldMask) ProtoSize() (n int) { return m.Size() }
|
||||||
|
func (m *SourceContext) ProtoSize() (n int) { return m.Size() }
|
||||||
|
func (m *Struct) ProtoSize() (n int) { return m.Size() }
|
||||||
|
func (m *Value) ProtoSize() (n int) { return m.Size() }
|
||||||
|
func (m *Value_NullValue) ProtoSize() (n int) { return m.Size() }
|
||||||
|
func (m *Value_NumberValue) ProtoSize() (n int) { return m.Size() }
|
||||||
|
func (m *Value_StringValue) ProtoSize() (n int) { return m.Size() }
|
||||||
|
func (m *Value_BoolValue) ProtoSize() (n int) { return m.Size() }
|
||||||
|
func (m *Value_StructValue) ProtoSize() (n int) { return m.Size() }
|
||||||
|
func (m *Value_ListValue) ProtoSize() (n int) { return m.Size() }
|
||||||
|
func (m *ListValue) ProtoSize() (n int) { return m.Size() }
|
||||||
|
func (m *Timestamp) ProtoSize() (n int) { return m.Size() }
|
||||||
|
func (m *Type) ProtoSize() (n int) { return m.Size() }
|
||||||
|
func (m *Field) ProtoSize() (n int) { return m.Size() }
|
||||||
|
func (m *Enum) ProtoSize() (n int) { return m.Size() }
|
||||||
|
func (m *EnumValue) ProtoSize() (n int) { return m.Size() }
|
||||||
|
func (m *Option) ProtoSize() (n int) { return m.Size() }
|
||||||
|
func (m *DoubleValue) ProtoSize() (n int) { return m.Size() }
|
||||||
|
func (m *FloatValue) ProtoSize() (n int) { return m.Size() }
|
||||||
|
func (m *Int64Value) ProtoSize() (n int) { return m.Size() }
|
||||||
|
func (m *UInt64Value) ProtoSize() (n int) { return m.Size() }
|
||||||
|
func (m *Int32Value) ProtoSize() (n int) { return m.Size() }
|
||||||
|
func (m *UInt32Value) ProtoSize() (n int) { return m.Size() }
|
||||||
|
func (m *BoolValue) ProtoSize() (n int) { return m.Size() }
|
||||||
|
func (m *StringValue) ProtoSize() (n int) { return m.Size() }
|
||||||
|
func (m *BytesValue) ProtoSize() (n int) { return m.Size() }
|
|
@ -0,0 +1,524 @@
|
||||||
|
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||||
|
// source: google/protobuf/source_context.proto
|
||||||
|
|
||||||
|
package types
|
||||||
|
|
||||||
|
import (
|
||||||
|
bytes "bytes"
|
||||||
|
fmt "fmt"
|
||||||
|
proto "github.com/gogo/protobuf/proto"
|
||||||
|
io "io"
|
||||||
|
math "math"
|
||||||
|
math_bits "math/bits"
|
||||||
|
reflect "reflect"
|
||||||
|
strings "strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
var _ = proto.Marshal
|
||||||
|
var _ = fmt.Errorf
|
||||||
|
var _ = math.Inf
|
||||||
|
|
||||||
|
// This is a compile-time assertion to ensure that this generated file
|
||||||
|
// is compatible with the proto package it is being compiled against.
|
||||||
|
// A compilation error at this line likely means your copy of the
|
||||||
|
// proto package needs to be updated.
|
||||||
|
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
|
||||||
|
|
||||||
|
// `SourceContext` represents information about the source of a
|
||||||
|
// protobuf element, like the file in which it is defined.
|
||||||
|
type SourceContext struct {
|
||||||
|
// The path-qualified name of the .proto file that contained the associated
|
||||||
|
// protobuf element. For example: `"google/protobuf/source_context.proto"`.
|
||||||
|
FileName string `protobuf:"bytes,1,opt,name=file_name,json=fileName,proto3" json:"file_name,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SourceContext) Reset() { *m = SourceContext{} }
|
||||||
|
func (*SourceContext) ProtoMessage() {}
|
||||||
|
func (*SourceContext) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_b686cdb126d509db, []int{0}
|
||||||
|
}
|
||||||
|
func (m *SourceContext) XXX_Unmarshal(b []byte) error {
|
||||||
|
return m.Unmarshal(b)
|
||||||
|
}
|
||||||
|
func (m *SourceContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
if deterministic {
|
||||||
|
return xxx_messageInfo_SourceContext.Marshal(b, m, deterministic)
|
||||||
|
} else {
|
||||||
|
b = b[:cap(b)]
|
||||||
|
n, err := m.MarshalToSizedBuffer(b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return b[:n], nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func (m *SourceContext) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_SourceContext.Merge(m, src)
|
||||||
|
}
|
||||||
|
func (m *SourceContext) XXX_Size() int {
|
||||||
|
return m.Size()
|
||||||
|
}
|
||||||
|
func (m *SourceContext) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_SourceContext.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_SourceContext proto.InternalMessageInfo
|
||||||
|
|
||||||
|
func (m *SourceContext) GetFileName() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.FileName
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*SourceContext) XXX_MessageName() string {
|
||||||
|
return "google.protobuf.SourceContext"
|
||||||
|
}
|
||||||
|
func init() {
|
||||||
|
proto.RegisterType((*SourceContext)(nil), "google.protobuf.SourceContext")
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterFile("google/protobuf/source_context.proto", fileDescriptor_b686cdb126d509db)
|
||||||
|
}
|
||||||
|
|
||||||
|
var fileDescriptor_b686cdb126d509db = []byte{
|
||||||
|
// 212 bytes of a gzipped FileDescriptorProto
|
||||||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x49, 0xcf, 0xcf, 0x4f,
|
||||||
|
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xce, 0x2f, 0x2d,
|
||||||
|
0x4a, 0x4e, 0x8d, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xad, 0x28, 0xd1, 0x03, 0x8b, 0x0b, 0xf1, 0x43,
|
||||||
|
0x54, 0xe9, 0xc1, 0x54, 0x29, 0xe9, 0x70, 0xf1, 0x06, 0x83, 0x15, 0x3a, 0x43, 0xd4, 0x09, 0x49,
|
||||||
|
0x73, 0x71, 0xa6, 0x65, 0xe6, 0xa4, 0xc6, 0xe7, 0x25, 0xe6, 0xa6, 0x4a, 0x30, 0x2a, 0x30, 0x6a,
|
||||||
|
0x70, 0x06, 0x71, 0x80, 0x04, 0xfc, 0x12, 0x73, 0x53, 0x9d, 0x3a, 0x19, 0x6f, 0x3c, 0x94, 0x63,
|
||||||
|
0xf8, 0xf0, 0x50, 0x8e, 0xf1, 0xc7, 0x43, 0x39, 0xc6, 0x86, 0x47, 0x72, 0x8c, 0x2b, 0x1e, 0xc9,
|
||||||
|
0x31, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x2f, 0x1e,
|
||||||
|
0xc9, 0x31, 0x7c, 0x00, 0x89, 0x3f, 0x96, 0x63, 0x3c, 0xf1, 0x58, 0x8e, 0x91, 0x4b, 0x38, 0x39,
|
||||||
|
0x3f, 0x57, 0x0f, 0xcd, 0x56, 0x27, 0x21, 0x14, 0x3b, 0x03, 0x40, 0xc2, 0x01, 0x8c, 0x51, 0xac,
|
||||||
|
0x25, 0x95, 0x05, 0xa9, 0xc5, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43,
|
||||||
|
0x34, 0x05, 0x40, 0x35, 0xe9, 0x85, 0xa7, 0xe6, 0xe4, 0x78, 0xe7, 0xe5, 0x97, 0xe7, 0x85, 0x80,
|
||||||
|
0x94, 0x25, 0xb1, 0x81, 0x4d, 0x33, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb8, 0x37, 0x2a, 0xa1,
|
||||||
|
0xf9, 0x00, 0x00, 0x00,
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *SourceContext) Compare(that interface{}) int {
|
||||||
|
if that == nil {
|
||||||
|
if this == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
that1, ok := that.(*SourceContext)
|
||||||
|
if !ok {
|
||||||
|
that2, ok := that.(SourceContext)
|
||||||
|
if ok {
|
||||||
|
that1 = &that2
|
||||||
|
} else {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if that1 == nil {
|
||||||
|
if this == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
} else if this == nil {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
if this.FileName != that1.FileName {
|
||||||
|
if this.FileName < that1.FileName {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
func (this *SourceContext) Equal(that interface{}) bool {
|
||||||
|
if that == nil {
|
||||||
|
return this == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
that1, ok := that.(*SourceContext)
|
||||||
|
if !ok {
|
||||||
|
that2, ok := that.(SourceContext)
|
||||||
|
if ok {
|
||||||
|
that1 = &that2
|
||||||
|
} else {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if that1 == nil {
|
||||||
|
return this == nil
|
||||||
|
} else if this == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if this.FileName != that1.FileName {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
func (this *SourceContext) GoString() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
s := make([]string, 0, 5)
|
||||||
|
s = append(s, "&types.SourceContext{")
|
||||||
|
s = append(s, "FileName: "+fmt.Sprintf("%#v", this.FileName)+",\n")
|
||||||
|
if this.XXX_unrecognized != nil {
|
||||||
|
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||||
|
}
|
||||||
|
s = append(s, "}")
|
||||||
|
return strings.Join(s, "")
|
||||||
|
}
|
||||||
|
func valueToGoStringSourceContext(v interface{}, typ string) string {
|
||||||
|
rv := reflect.ValueOf(v)
|
||||||
|
if rv.IsNil() {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
pv := reflect.Indirect(rv).Interface()
|
||||||
|
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
|
||||||
|
}
|
||||||
|
func (m *SourceContext) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SourceContext) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
size := m.Size()
|
||||||
|
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SourceContext) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||||
|
i := len(dAtA)
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if m.XXX_unrecognized != nil {
|
||||||
|
i -= len(m.XXX_unrecognized)
|
||||||
|
copy(dAtA[i:], m.XXX_unrecognized)
|
||||||
|
}
|
||||||
|
if len(m.FileName) > 0 {
|
||||||
|
i -= len(m.FileName)
|
||||||
|
copy(dAtA[i:], m.FileName)
|
||||||
|
i = encodeVarintSourceContext(dAtA, i, uint64(len(m.FileName)))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
}
|
||||||
|
return len(dAtA) - i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeVarintSourceContext(dAtA []byte, offset int, v uint64) int {
|
||||||
|
offset -= sovSourceContext(v)
|
||||||
|
base := offset
|
||||||
|
for v >= 1<<7 {
|
||||||
|
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||||
|
v >>= 7
|
||||||
|
offset++
|
||||||
|
}
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
return base
|
||||||
|
}
|
||||||
|
func NewPopulatedSourceContext(r randySourceContext, easy bool) *SourceContext {
|
||||||
|
this := &SourceContext{}
|
||||||
|
this.FileName = string(randStringSourceContext(r))
|
||||||
|
if !easy && r.Intn(10) != 0 {
|
||||||
|
this.XXX_unrecognized = randUnrecognizedSourceContext(r, 2)
|
||||||
|
}
|
||||||
|
return this
|
||||||
|
}
|
||||||
|
|
||||||
|
type randySourceContext interface {
|
||||||
|
Float32() float32
|
||||||
|
Float64() float64
|
||||||
|
Int63() int64
|
||||||
|
Int31() int32
|
||||||
|
Uint32() uint32
|
||||||
|
Intn(n int) int
|
||||||
|
}
|
||||||
|
|
||||||
|
func randUTF8RuneSourceContext(r randySourceContext) rune {
|
||||||
|
ru := r.Intn(62)
|
||||||
|
if ru < 10 {
|
||||||
|
return rune(ru + 48)
|
||||||
|
} else if ru < 36 {
|
||||||
|
return rune(ru + 55)
|
||||||
|
}
|
||||||
|
return rune(ru + 61)
|
||||||
|
}
|
||||||
|
func randStringSourceContext(r randySourceContext) string {
|
||||||
|
v1 := r.Intn(100)
|
||||||
|
tmps := make([]rune, v1)
|
||||||
|
for i := 0; i < v1; i++ {
|
||||||
|
tmps[i] = randUTF8RuneSourceContext(r)
|
||||||
|
}
|
||||||
|
return string(tmps)
|
||||||
|
}
|
||||||
|
func randUnrecognizedSourceContext(r randySourceContext, maxFieldNumber int) (dAtA []byte) {
|
||||||
|
l := r.Intn(5)
|
||||||
|
for i := 0; i < l; i++ {
|
||||||
|
wire := r.Intn(4)
|
||||||
|
if wire == 3 {
|
||||||
|
wire = 5
|
||||||
|
}
|
||||||
|
fieldNumber := maxFieldNumber + r.Intn(100)
|
||||||
|
dAtA = randFieldSourceContext(dAtA, r, fieldNumber, wire)
|
||||||
|
}
|
||||||
|
return dAtA
|
||||||
|
}
|
||||||
|
func randFieldSourceContext(dAtA []byte, r randySourceContext, fieldNumber int, wire int) []byte {
|
||||||
|
key := uint32(fieldNumber)<<3 | uint32(wire)
|
||||||
|
switch wire {
|
||||||
|
case 0:
|
||||||
|
dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(key))
|
||||||
|
v2 := r.Int63()
|
||||||
|
if r.Intn(2) == 0 {
|
||||||
|
v2 *= -1
|
||||||
|
}
|
||||||
|
dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(v2))
|
||||||
|
case 1:
|
||||||
|
dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(key))
|
||||||
|
dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)))
|
||||||
|
case 2:
|
||||||
|
dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(key))
|
||||||
|
ll := r.Intn(100)
|
||||||
|
dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(ll))
|
||||||
|
for j := 0; j < ll; j++ {
|
||||||
|
dAtA = append(dAtA, byte(r.Intn(256)))
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(key))
|
||||||
|
dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)))
|
||||||
|
}
|
||||||
|
return dAtA
|
||||||
|
}
|
||||||
|
func encodeVarintPopulateSourceContext(dAtA []byte, v uint64) []byte {
|
||||||
|
for v >= 1<<7 {
|
||||||
|
dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80))
|
||||||
|
v >>= 7
|
||||||
|
}
|
||||||
|
dAtA = append(dAtA, uint8(v))
|
||||||
|
return dAtA
|
||||||
|
}
|
||||||
|
func (m *SourceContext) Size() (n int) {
|
||||||
|
if m == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
l = len(m.FileName)
|
||||||
|
if l > 0 {
|
||||||
|
n += 1 + l + sovSourceContext(uint64(l))
|
||||||
|
}
|
||||||
|
if m.XXX_unrecognized != nil {
|
||||||
|
n += len(m.XXX_unrecognized)
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func sovSourceContext(x uint64) (n int) {
|
||||||
|
return (math_bits.Len64(x|1) + 6) / 7
|
||||||
|
}
|
||||||
|
func sozSourceContext(x uint64) (n int) {
|
||||||
|
return sovSourceContext(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||||
|
}
|
||||||
|
func (this *SourceContext) String() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
s := strings.Join([]string{`&SourceContext{`,
|
||||||
|
`FileName:` + fmt.Sprintf("%v", this.FileName) + `,`,
|
||||||
|
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||||
|
`}`,
|
||||||
|
}, "")
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
func valueToStringSourceContext(v interface{}) string {
|
||||||
|
rv := reflect.ValueOf(v)
|
||||||
|
if rv.IsNil() {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
pv := reflect.Indirect(rv).Interface()
|
||||||
|
return fmt.Sprintf("*%v", pv)
|
||||||
|
}
|
||||||
|
func (m *SourceContext) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowSourceContext
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= uint64(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: SourceContext: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: SourceContext: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field FileName", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowSourceContext
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= uint64(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthSourceContext
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex < 0 {
|
||||||
|
return ErrInvalidLengthSourceContext
|
||||||
|
}
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.FileName = string(dAtA[iNdEx:postIndex])
|
||||||
|
iNdEx = postIndex
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipSourceContext(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||||
|
return ErrInvalidLengthSourceContext
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func skipSourceContext(dAtA []byte) (n int, err error) {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
depth := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowSourceContext
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
switch wireType {
|
||||||
|
case 0:
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowSourceContext
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx++
|
||||||
|
if dAtA[iNdEx-1] < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case 1:
|
||||||
|
iNdEx += 8
|
||||||
|
case 2:
|
||||||
|
var length int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowSourceContext
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
length |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if length < 0 {
|
||||||
|
return 0, ErrInvalidLengthSourceContext
|
||||||
|
}
|
||||||
|
iNdEx += length
|
||||||
|
case 3:
|
||||||
|
depth++
|
||||||
|
case 4:
|
||||||
|
if depth == 0 {
|
||||||
|
return 0, ErrUnexpectedEndOfGroupSourceContext
|
||||||
|
}
|
||||||
|
depth--
|
||||||
|
case 5:
|
||||||
|
iNdEx += 4
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||||
|
}
|
||||||
|
if iNdEx < 0 {
|
||||||
|
return 0, ErrInvalidLengthSourceContext
|
||||||
|
}
|
||||||
|
if depth == 0 {
|
||||||
|
return iNdEx, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrInvalidLengthSourceContext = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||||
|
ErrIntOverflowSourceContext = fmt.Errorf("proto: integer overflow")
|
||||||
|
ErrUnexpectedEndOfGroupSourceContext = fmt.Errorf("proto: unexpected end of group")
|
||||||
|
)
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,130 @@
|
||||||
|
// Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
//
|
||||||
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
|
// https://github.com/golang/protobuf
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
package types
|
||||||
|
|
||||||
|
// This file implements operations on google.protobuf.Timestamp.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Seconds field of the earliest valid Timestamp.
|
||||||
|
// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
|
||||||
|
minValidSeconds = -62135596800
|
||||||
|
// Seconds field just after the latest valid Timestamp.
|
||||||
|
// This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
|
||||||
|
maxValidSeconds = 253402300800
|
||||||
|
)
|
||||||
|
|
||||||
|
// validateTimestamp determines whether a Timestamp is valid.
|
||||||
|
// A valid timestamp represents a time in the range
|
||||||
|
// [0001-01-01, 10000-01-01) and has a Nanos field
|
||||||
|
// in the range [0, 1e9).
|
||||||
|
//
|
||||||
|
// If the Timestamp is valid, validateTimestamp returns nil.
|
||||||
|
// Otherwise, it returns an error that describes
|
||||||
|
// the problem.
|
||||||
|
//
|
||||||
|
// Every valid Timestamp can be represented by a time.Time, but the converse is not true.
|
||||||
|
func validateTimestamp(ts *Timestamp) error {
|
||||||
|
if ts == nil {
|
||||||
|
return errors.New("timestamp: nil Timestamp")
|
||||||
|
}
|
||||||
|
if ts.Seconds < minValidSeconds {
|
||||||
|
return fmt.Errorf("timestamp: %#v before 0001-01-01", ts)
|
||||||
|
}
|
||||||
|
if ts.Seconds >= maxValidSeconds {
|
||||||
|
return fmt.Errorf("timestamp: %#v after 10000-01-01", ts)
|
||||||
|
}
|
||||||
|
if ts.Nanos < 0 || ts.Nanos >= 1e9 {
|
||||||
|
return fmt.Errorf("timestamp: %#v: nanos not in range [0, 1e9)", ts)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TimestampFromProto converts a google.protobuf.Timestamp proto to a time.Time.
|
||||||
|
// It returns an error if the argument is invalid.
|
||||||
|
//
|
||||||
|
// Unlike most Go functions, if Timestamp returns an error, the first return value
|
||||||
|
// is not the zero time.Time. Instead, it is the value obtained from the
|
||||||
|
// time.Unix function when passed the contents of the Timestamp, in the UTC
|
||||||
|
// locale. This may or may not be a meaningful time; many invalid Timestamps
|
||||||
|
// do map to valid time.Times.
|
||||||
|
//
|
||||||
|
// A nil Timestamp returns an error. The first return value in that case is
|
||||||
|
// undefined.
|
||||||
|
func TimestampFromProto(ts *Timestamp) (time.Time, error) {
|
||||||
|
// Don't return the zero value on error, because corresponds to a valid
|
||||||
|
// timestamp. Instead return whatever time.Unix gives us.
|
||||||
|
var t time.Time
|
||||||
|
if ts == nil {
|
||||||
|
t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
|
||||||
|
} else {
|
||||||
|
t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
|
||||||
|
}
|
||||||
|
return t, validateTimestamp(ts)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TimestampNow returns a google.protobuf.Timestamp for the current time.
|
||||||
|
func TimestampNow() *Timestamp {
|
||||||
|
ts, err := TimestampProto(time.Now())
|
||||||
|
if err != nil {
|
||||||
|
panic("ptypes: time.Now() out of Timestamp range")
|
||||||
|
}
|
||||||
|
return ts
|
||||||
|
}
|
||||||
|
|
||||||
|
// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
|
||||||
|
// It returns an error if the resulting Timestamp is invalid.
|
||||||
|
func TimestampProto(t time.Time) (*Timestamp, error) {
|
||||||
|
ts := &Timestamp{
|
||||||
|
Seconds: t.Unix(),
|
||||||
|
Nanos: int32(t.Nanosecond()),
|
||||||
|
}
|
||||||
|
if err := validateTimestamp(ts); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ts, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid
|
||||||
|
// Timestamps, it returns an error message in parentheses.
|
||||||
|
func TimestampString(ts *Timestamp) string {
|
||||||
|
t, err := TimestampFromProto(ts)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Sprintf("(%v)", err)
|
||||||
|
}
|
||||||
|
return t.Format(time.RFC3339Nano)
|
||||||
|
}
|
|
@ -0,0 +1,539 @@
|
||||||
|
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||||
|
// source: google/protobuf/timestamp.proto
|
||||||
|
|
||||||
|
package types
|
||||||
|
|
||||||
|
import (
|
||||||
|
bytes "bytes"
|
||||||
|
fmt "fmt"
|
||||||
|
proto "github.com/gogo/protobuf/proto"
|
||||||
|
io "io"
|
||||||
|
math "math"
|
||||||
|
math_bits "math/bits"
|
||||||
|
reflect "reflect"
|
||||||
|
strings "strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
var _ = proto.Marshal
|
||||||
|
var _ = fmt.Errorf
|
||||||
|
var _ = math.Inf
|
||||||
|
|
||||||
|
// This is a compile-time assertion to ensure that this generated file
|
||||||
|
// is compatible with the proto package it is being compiled against.
|
||||||
|
// A compilation error at this line likely means your copy of the
|
||||||
|
// proto package needs to be updated.
|
||||||
|
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
|
||||||
|
|
||||||
|
// A Timestamp represents a point in time independent of any time zone or local
|
||||||
|
// calendar, encoded as a count of seconds and fractions of seconds at
|
||||||
|
// nanosecond resolution. The count is relative to an epoch at UTC midnight on
|
||||||
|
// January 1, 1970, in the proleptic Gregorian calendar which extends the
|
||||||
|
// Gregorian calendar backwards to year one.
|
||||||
|
//
|
||||||
|
// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap
|
||||||
|
// second table is needed for interpretation, using a [24-hour linear
|
||||||
|
// smear](https://developers.google.com/time/smear).
|
||||||
|
//
|
||||||
|
// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By
|
||||||
|
// restricting to that range, we ensure that we can convert to and from [RFC
|
||||||
|
// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings.
|
||||||
|
//
|
||||||
|
// # Examples
|
||||||
|
//
|
||||||
|
// Example 1: Compute Timestamp from POSIX `time()`.
|
||||||
|
//
|
||||||
|
// Timestamp timestamp;
|
||||||
|
// timestamp.set_seconds(time(NULL));
|
||||||
|
// timestamp.set_nanos(0);
|
||||||
|
//
|
||||||
|
// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
|
||||||
|
//
|
||||||
|
// struct timeval tv;
|
||||||
|
// gettimeofday(&tv, NULL);
|
||||||
|
//
|
||||||
|
// Timestamp timestamp;
|
||||||
|
// timestamp.set_seconds(tv.tv_sec);
|
||||||
|
// timestamp.set_nanos(tv.tv_usec * 1000);
|
||||||
|
//
|
||||||
|
// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
|
||||||
|
//
|
||||||
|
// FILETIME ft;
|
||||||
|
// GetSystemTimeAsFileTime(&ft);
|
||||||
|
// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
|
||||||
|
//
|
||||||
|
// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
|
||||||
|
// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
|
||||||
|
// Timestamp timestamp;
|
||||||
|
// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
|
||||||
|
// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
|
||||||
|
//
|
||||||
|
// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
|
||||||
|
//
|
||||||
|
// long millis = System.currentTimeMillis();
|
||||||
|
//
|
||||||
|
// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
|
||||||
|
// .setNanos((int) ((millis % 1000) * 1000000)).build();
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// Example 5: Compute Timestamp from current time in Python.
|
||||||
|
//
|
||||||
|
// timestamp = Timestamp()
|
||||||
|
// timestamp.GetCurrentTime()
|
||||||
|
//
|
||||||
|
// # JSON Mapping
|
||||||
|
//
|
||||||
|
// In JSON format, the Timestamp type is encoded as a string in the
|
||||||
|
// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
|
||||||
|
// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
|
||||||
|
// where {year} is always expressed using four digits while {month}, {day},
|
||||||
|
// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
|
||||||
|
// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
|
||||||
|
// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
|
||||||
|
// is required. A proto3 JSON serializer should always use UTC (as indicated by
|
||||||
|
// "Z") when printing the Timestamp type and a proto3 JSON parser should be
|
||||||
|
// able to accept both UTC and other timezones (as indicated by an offset).
|
||||||
|
//
|
||||||
|
// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
|
||||||
|
// 01:30 UTC on January 15, 2017.
|
||||||
|
//
|
||||||
|
// In JavaScript, one can convert a Date object to this format using the
|
||||||
|
// standard
|
||||||
|
// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString)
|
||||||
|
// method. In Python, a standard `datetime.datetime` object can be converted
|
||||||
|
// to this format using
|
||||||
|
// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with
|
||||||
|
// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use
|
||||||
|
// the Joda Time's [`ISODateTimeFormat.dateTime()`](
|
||||||
|
// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D
|
||||||
|
// ) to obtain a formatter capable of generating timestamps in this format.
|
||||||
|
//
|
||||||
|
//
|
||||||
|
type Timestamp struct {
|
||||||
|
// Represents seconds of UTC time since Unix epoch
|
||||||
|
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
|
||||||
|
// 9999-12-31T23:59:59Z inclusive.
|
||||||
|
Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
|
||||||
|
// Non-negative fractions of a second at nanosecond resolution. Negative
|
||||||
|
// second values with fractions must still have non-negative nanos values
|
||||||
|
// that count forward in time. Must be from 0 to 999,999,999
|
||||||
|
// inclusive.
|
||||||
|
Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Timestamp) Reset() { *m = Timestamp{} }
|
||||||
|
func (*Timestamp) ProtoMessage() {}
|
||||||
|
func (*Timestamp) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_292007bbfe81227e, []int{0}
|
||||||
|
}
|
||||||
|
func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" }
|
||||||
|
func (m *Timestamp) XXX_Unmarshal(b []byte) error {
|
||||||
|
return m.Unmarshal(b)
|
||||||
|
}
|
||||||
|
func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
if deterministic {
|
||||||
|
return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic)
|
||||||
|
} else {
|
||||||
|
b = b[:cap(b)]
|
||||||
|
n, err := m.MarshalToSizedBuffer(b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return b[:n], nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func (m *Timestamp) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_Timestamp.Merge(m, src)
|
||||||
|
}
|
||||||
|
func (m *Timestamp) XXX_Size() int {
|
||||||
|
return m.Size()
|
||||||
|
}
|
||||||
|
func (m *Timestamp) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_Timestamp.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_Timestamp proto.InternalMessageInfo
|
||||||
|
|
||||||
|
func (m *Timestamp) GetSeconds() int64 {
|
||||||
|
if m != nil {
|
||||||
|
return m.Seconds
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Timestamp) GetNanos() int32 {
|
||||||
|
if m != nil {
|
||||||
|
return m.Nanos
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*Timestamp) XXX_MessageName() string {
|
||||||
|
return "google.protobuf.Timestamp"
|
||||||
|
}
|
||||||
|
func init() {
|
||||||
|
proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp")
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e) }
|
||||||
|
|
||||||
|
var fileDescriptor_292007bbfe81227e = []byte{
|
||||||
|
// 212 bytes of a gzipped FileDescriptorProto
|
||||||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f,
|
||||||
|
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d,
|
||||||
|
0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28,
|
||||||
|
0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5,
|
||||||
|
0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89,
|
||||||
|
0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x03, 0xe3, 0x8d,
|
||||||
|
0x87, 0x72, 0x0c, 0x1f, 0x1e, 0xca, 0x31, 0xae, 0x78, 0x24, 0xc7, 0x78, 0xe2, 0x91, 0x1c, 0xe3,
|
||||||
|
0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0xbe, 0x78, 0x24, 0xc7, 0xf0, 0xe1, 0x91, 0x1c,
|
||||||
|
0xe3, 0x8a, 0xc7, 0x72, 0x8c, 0x27, 0x1e, 0xcb, 0x31, 0x72, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1,
|
||||||
|
0x59, 0xee, 0xc4, 0x07, 0xb7, 0x3a, 0x00, 0x24, 0x14, 0xc0, 0x18, 0xc5, 0x5a, 0x52, 0x59, 0x90,
|
||||||
|
0x5a, 0xfc, 0x83, 0x91, 0x71, 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88,
|
||||||
|
0x9e, 0x00, 0xa8, 0x1e, 0xbd, 0xf0, 0xd4, 0x9c, 0x1c, 0xef, 0xbc, 0xfc, 0xf2, 0xbc, 0x10, 0x90,
|
||||||
|
0xca, 0x24, 0x36, 0xb0, 0x61, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x0b, 0x23, 0x83, 0xdd,
|
||||||
|
0xfa, 0x00, 0x00, 0x00,
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *Timestamp) Compare(that interface{}) int {
|
||||||
|
if that == nil {
|
||||||
|
if this == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
that1, ok := that.(*Timestamp)
|
||||||
|
if !ok {
|
||||||
|
that2, ok := that.(Timestamp)
|
||||||
|
if ok {
|
||||||
|
that1 = &that2
|
||||||
|
} else {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if that1 == nil {
|
||||||
|
if this == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
} else if this == nil {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
if this.Seconds != that1.Seconds {
|
||||||
|
if this.Seconds < that1.Seconds {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
if this.Nanos != that1.Nanos {
|
||||||
|
if this.Nanos < that1.Nanos {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
func (this *Timestamp) Equal(that interface{}) bool {
|
||||||
|
if that == nil {
|
||||||
|
return this == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
that1, ok := that.(*Timestamp)
|
||||||
|
if !ok {
|
||||||
|
that2, ok := that.(Timestamp)
|
||||||
|
if ok {
|
||||||
|
that1 = &that2
|
||||||
|
} else {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if that1 == nil {
|
||||||
|
return this == nil
|
||||||
|
} else if this == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if this.Seconds != that1.Seconds {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if this.Nanos != that1.Nanos {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
func (this *Timestamp) GoString() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
s := make([]string, 0, 6)
|
||||||
|
s = append(s, "&types.Timestamp{")
|
||||||
|
s = append(s, "Seconds: "+fmt.Sprintf("%#v", this.Seconds)+",\n")
|
||||||
|
s = append(s, "Nanos: "+fmt.Sprintf("%#v", this.Nanos)+",\n")
|
||||||
|
if this.XXX_unrecognized != nil {
|
||||||
|
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||||
|
}
|
||||||
|
s = append(s, "}")
|
||||||
|
return strings.Join(s, "")
|
||||||
|
}
|
||||||
|
func valueToGoStringTimestamp(v interface{}, typ string) string {
|
||||||
|
rv := reflect.ValueOf(v)
|
||||||
|
if rv.IsNil() {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
pv := reflect.Indirect(rv).Interface()
|
||||||
|
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
|
||||||
|
}
|
||||||
|
func (m *Timestamp) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Timestamp) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
size := m.Size()
|
||||||
|
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Timestamp) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||||
|
i := len(dAtA)
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if m.XXX_unrecognized != nil {
|
||||||
|
i -= len(m.XXX_unrecognized)
|
||||||
|
copy(dAtA[i:], m.XXX_unrecognized)
|
||||||
|
}
|
||||||
|
if m.Nanos != 0 {
|
||||||
|
i = encodeVarintTimestamp(dAtA, i, uint64(m.Nanos))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x10
|
||||||
|
}
|
||||||
|
if m.Seconds != 0 {
|
||||||
|
i = encodeVarintTimestamp(dAtA, i, uint64(m.Seconds))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x8
|
||||||
|
}
|
||||||
|
return len(dAtA) - i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeVarintTimestamp(dAtA []byte, offset int, v uint64) int {
|
||||||
|
offset -= sovTimestamp(v)
|
||||||
|
base := offset
|
||||||
|
for v >= 1<<7 {
|
||||||
|
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||||
|
v >>= 7
|
||||||
|
offset++
|
||||||
|
}
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
return base
|
||||||
|
}
|
||||||
|
func (m *Timestamp) Size() (n int) {
|
||||||
|
if m == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if m.Seconds != 0 {
|
||||||
|
n += 1 + sovTimestamp(uint64(m.Seconds))
|
||||||
|
}
|
||||||
|
if m.Nanos != 0 {
|
||||||
|
n += 1 + sovTimestamp(uint64(m.Nanos))
|
||||||
|
}
|
||||||
|
if m.XXX_unrecognized != nil {
|
||||||
|
n += len(m.XXX_unrecognized)
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func sovTimestamp(x uint64) (n int) {
|
||||||
|
return (math_bits.Len64(x|1) + 6) / 7
|
||||||
|
}
|
||||||
|
func sozTimestamp(x uint64) (n int) {
|
||||||
|
return sovTimestamp(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||||
|
}
|
||||||
|
func (m *Timestamp) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowTimestamp
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= uint64(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: Timestamp: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: Timestamp: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 0 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType)
|
||||||
|
}
|
||||||
|
m.Seconds = 0
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowTimestamp
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
m.Seconds |= int64(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case 2:
|
||||||
|
if wireType != 0 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType)
|
||||||
|
}
|
||||||
|
m.Nanos = 0
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowTimestamp
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
m.Nanos |= int32(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipTimestamp(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||||
|
return ErrInvalidLengthTimestamp
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func skipTimestamp(dAtA []byte) (n int, err error) {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
depth := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowTimestamp
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
switch wireType {
|
||||||
|
case 0:
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowTimestamp
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx++
|
||||||
|
if dAtA[iNdEx-1] < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case 1:
|
||||||
|
iNdEx += 8
|
||||||
|
case 2:
|
||||||
|
var length int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowTimestamp
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
length |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if length < 0 {
|
||||||
|
return 0, ErrInvalidLengthTimestamp
|
||||||
|
}
|
||||||
|
iNdEx += length
|
||||||
|
case 3:
|
||||||
|
depth++
|
||||||
|
case 4:
|
||||||
|
if depth == 0 {
|
||||||
|
return 0, ErrUnexpectedEndOfGroupTimestamp
|
||||||
|
}
|
||||||
|
depth--
|
||||||
|
case 5:
|
||||||
|
iNdEx += 4
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||||
|
}
|
||||||
|
if iNdEx < 0 {
|
||||||
|
return 0, ErrInvalidLengthTimestamp
|
||||||
|
}
|
||||||
|
if depth == 0 {
|
||||||
|
return iNdEx, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrInvalidLengthTimestamp = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||||
|
ErrIntOverflowTimestamp = fmt.Errorf("proto: integer overflow")
|
||||||
|
ErrUnexpectedEndOfGroupTimestamp = fmt.Errorf("proto: unexpected end of group")
|
||||||
|
)
|
|
@ -0,0 +1,94 @@
|
||||||
|
// Protocol Buffers for Go with Gadgets
|
||||||
|
//
|
||||||
|
// Copyright (c) 2016, The GoGo Authors. All rights reserved.
|
||||||
|
// http://github.com/gogo/protobuf
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
package types
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func NewPopulatedTimestamp(r interface {
|
||||||
|
Int63() int64
|
||||||
|
}, easy bool) *Timestamp {
|
||||||
|
this := &Timestamp{}
|
||||||
|
ns := int64(r.Int63())
|
||||||
|
this.Seconds = ns / 1e9
|
||||||
|
this.Nanos = int32(ns % 1e9)
|
||||||
|
return this
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *Timestamp) String() string {
|
||||||
|
return TimestampString(ts)
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewPopulatedStdTime(r interface {
|
||||||
|
Int63() int64
|
||||||
|
}, easy bool) *time.Time {
|
||||||
|
timestamp := NewPopulatedTimestamp(r, easy)
|
||||||
|
t, err := TimestampFromProto(timestamp)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &t
|
||||||
|
}
|
||||||
|
|
||||||
|
func SizeOfStdTime(t time.Time) int {
|
||||||
|
ts, err := TimestampProto(t)
|
||||||
|
if err != nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return ts.Size()
|
||||||
|
}
|
||||||
|
|
||||||
|
func StdTimeMarshal(t time.Time) ([]byte, error) {
|
||||||
|
size := SizeOfStdTime(t)
|
||||||
|
buf := make([]byte, size)
|
||||||
|
_, err := StdTimeMarshalTo(t, buf)
|
||||||
|
return buf, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func StdTimeMarshalTo(t time.Time, data []byte) (int, error) {
|
||||||
|
ts, err := TimestampProto(t)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return ts.MarshalTo(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func StdTimeUnmarshal(t *time.Time, data []byte) error {
|
||||||
|
ts := &Timestamp{}
|
||||||
|
if err := ts.Unmarshal(data); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tt, err := TimestampFromProto(ts)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*t = tt
|
||||||
|
return nil
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,300 @@
|
||||||
|
// Protocol Buffers for Go with Gadgets
|
||||||
|
//
|
||||||
|
// Copyright (c) 2018, The GoGo Authors. All rights reserved.
|
||||||
|
// http://github.com/gogo/protobuf
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
package types
|
||||||
|
|
||||||
|
func NewPopulatedStdDouble(r randyWrappers, easy bool) *float64 {
|
||||||
|
v := NewPopulatedDoubleValue(r, easy)
|
||||||
|
return &v.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
func SizeOfStdDouble(v float64) int {
|
||||||
|
pv := &DoubleValue{Value: v}
|
||||||
|
return pv.Size()
|
||||||
|
}
|
||||||
|
|
||||||
|
func StdDoubleMarshal(v float64) ([]byte, error) {
|
||||||
|
size := SizeOfStdDouble(v)
|
||||||
|
buf := make([]byte, size)
|
||||||
|
_, err := StdDoubleMarshalTo(v, buf)
|
||||||
|
return buf, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func StdDoubleMarshalTo(v float64, data []byte) (int, error) {
|
||||||
|
pv := &DoubleValue{Value: v}
|
||||||
|
return pv.MarshalTo(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func StdDoubleUnmarshal(v *float64, data []byte) error {
|
||||||
|
pv := &DoubleValue{}
|
||||||
|
if err := pv.Unmarshal(data); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*v = pv.Value
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func NewPopulatedStdFloat(r randyWrappers, easy bool) *float32 {
|
||||||
|
v := NewPopulatedFloatValue(r, easy)
|
||||||
|
return &v.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
func SizeOfStdFloat(v float32) int {
|
||||||
|
pv := &FloatValue{Value: v}
|
||||||
|
return pv.Size()
|
||||||
|
}
|
||||||
|
|
||||||
|
func StdFloatMarshal(v float32) ([]byte, error) {
|
||||||
|
size := SizeOfStdFloat(v)
|
||||||
|
buf := make([]byte, size)
|
||||||
|
_, err := StdFloatMarshalTo(v, buf)
|
||||||
|
return buf, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func StdFloatMarshalTo(v float32, data []byte) (int, error) {
|
||||||
|
pv := &FloatValue{Value: v}
|
||||||
|
return pv.MarshalTo(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func StdFloatUnmarshal(v *float32, data []byte) error {
|
||||||
|
pv := &FloatValue{}
|
||||||
|
if err := pv.Unmarshal(data); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*v = pv.Value
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func NewPopulatedStdInt64(r randyWrappers, easy bool) *int64 {
|
||||||
|
v := NewPopulatedInt64Value(r, easy)
|
||||||
|
return &v.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
func SizeOfStdInt64(v int64) int {
|
||||||
|
pv := &Int64Value{Value: v}
|
||||||
|
return pv.Size()
|
||||||
|
}
|
||||||
|
|
||||||
|
func StdInt64Marshal(v int64) ([]byte, error) {
|
||||||
|
size := SizeOfStdInt64(v)
|
||||||
|
buf := make([]byte, size)
|
||||||
|
_, err := StdInt64MarshalTo(v, buf)
|
||||||
|
return buf, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func StdInt64MarshalTo(v int64, data []byte) (int, error) {
|
||||||
|
pv := &Int64Value{Value: v}
|
||||||
|
return pv.MarshalTo(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func StdInt64Unmarshal(v *int64, data []byte) error {
|
||||||
|
pv := &Int64Value{}
|
||||||
|
if err := pv.Unmarshal(data); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*v = pv.Value
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func NewPopulatedStdUInt64(r randyWrappers, easy bool) *uint64 {
|
||||||
|
v := NewPopulatedUInt64Value(r, easy)
|
||||||
|
return &v.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
func SizeOfStdUInt64(v uint64) int {
|
||||||
|
pv := &UInt64Value{Value: v}
|
||||||
|
return pv.Size()
|
||||||
|
}
|
||||||
|
|
||||||
|
func StdUInt64Marshal(v uint64) ([]byte, error) {
|
||||||
|
size := SizeOfStdUInt64(v)
|
||||||
|
buf := make([]byte, size)
|
||||||
|
_, err := StdUInt64MarshalTo(v, buf)
|
||||||
|
return buf, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func StdUInt64MarshalTo(v uint64, data []byte) (int, error) {
|
||||||
|
pv := &UInt64Value{Value: v}
|
||||||
|
return pv.MarshalTo(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func StdUInt64Unmarshal(v *uint64, data []byte) error {
|
||||||
|
pv := &UInt64Value{}
|
||||||
|
if err := pv.Unmarshal(data); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*v = pv.Value
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func NewPopulatedStdInt32(r randyWrappers, easy bool) *int32 {
|
||||||
|
v := NewPopulatedInt32Value(r, easy)
|
||||||
|
return &v.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
func SizeOfStdInt32(v int32) int {
|
||||||
|
pv := &Int32Value{Value: v}
|
||||||
|
return pv.Size()
|
||||||
|
}
|
||||||
|
|
||||||
|
func StdInt32Marshal(v int32) ([]byte, error) {
|
||||||
|
size := SizeOfStdInt32(v)
|
||||||
|
buf := make([]byte, size)
|
||||||
|
_, err := StdInt32MarshalTo(v, buf)
|
||||||
|
return buf, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func StdInt32MarshalTo(v int32, data []byte) (int, error) {
|
||||||
|
pv := &Int32Value{Value: v}
|
||||||
|
return pv.MarshalTo(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func StdInt32Unmarshal(v *int32, data []byte) error {
|
||||||
|
pv := &Int32Value{}
|
||||||
|
if err := pv.Unmarshal(data); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*v = pv.Value
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func NewPopulatedStdUInt32(r randyWrappers, easy bool) *uint32 {
|
||||||
|
v := NewPopulatedUInt32Value(r, easy)
|
||||||
|
return &v.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
func SizeOfStdUInt32(v uint32) int {
|
||||||
|
pv := &UInt32Value{Value: v}
|
||||||
|
return pv.Size()
|
||||||
|
}
|
||||||
|
|
||||||
|
func StdUInt32Marshal(v uint32) ([]byte, error) {
|
||||||
|
size := SizeOfStdUInt32(v)
|
||||||
|
buf := make([]byte, size)
|
||||||
|
_, err := StdUInt32MarshalTo(v, buf)
|
||||||
|
return buf, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func StdUInt32MarshalTo(v uint32, data []byte) (int, error) {
|
||||||
|
pv := &UInt32Value{Value: v}
|
||||||
|
return pv.MarshalTo(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func StdUInt32Unmarshal(v *uint32, data []byte) error {
|
||||||
|
pv := &UInt32Value{}
|
||||||
|
if err := pv.Unmarshal(data); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*v = pv.Value
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func NewPopulatedStdBool(r randyWrappers, easy bool) *bool {
|
||||||
|
v := NewPopulatedBoolValue(r, easy)
|
||||||
|
return &v.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
func SizeOfStdBool(v bool) int {
|
||||||
|
pv := &BoolValue{Value: v}
|
||||||
|
return pv.Size()
|
||||||
|
}
|
||||||
|
|
||||||
|
func StdBoolMarshal(v bool) ([]byte, error) {
|
||||||
|
size := SizeOfStdBool(v)
|
||||||
|
buf := make([]byte, size)
|
||||||
|
_, err := StdBoolMarshalTo(v, buf)
|
||||||
|
return buf, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func StdBoolMarshalTo(v bool, data []byte) (int, error) {
|
||||||
|
pv := &BoolValue{Value: v}
|
||||||
|
return pv.MarshalTo(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func StdBoolUnmarshal(v *bool, data []byte) error {
|
||||||
|
pv := &BoolValue{}
|
||||||
|
if err := pv.Unmarshal(data); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*v = pv.Value
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func NewPopulatedStdString(r randyWrappers, easy bool) *string {
|
||||||
|
v := NewPopulatedStringValue(r, easy)
|
||||||
|
return &v.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
func SizeOfStdString(v string) int {
|
||||||
|
pv := &StringValue{Value: v}
|
||||||
|
return pv.Size()
|
||||||
|
}
|
||||||
|
|
||||||
|
func StdStringMarshal(v string) ([]byte, error) {
|
||||||
|
size := SizeOfStdString(v)
|
||||||
|
buf := make([]byte, size)
|
||||||
|
_, err := StdStringMarshalTo(v, buf)
|
||||||
|
return buf, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func StdStringMarshalTo(v string, data []byte) (int, error) {
|
||||||
|
pv := &StringValue{Value: v}
|
||||||
|
return pv.MarshalTo(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func StdStringUnmarshal(v *string, data []byte) error {
|
||||||
|
pv := &StringValue{}
|
||||||
|
if err := pv.Unmarshal(data); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*v = pv.Value
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func NewPopulatedStdBytes(r randyWrappers, easy bool) *[]byte {
|
||||||
|
v := NewPopulatedBytesValue(r, easy)
|
||||||
|
return &v.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
func SizeOfStdBytes(v []byte) int {
|
||||||
|
pv := &BytesValue{Value: v}
|
||||||
|
return pv.Size()
|
||||||
|
}
|
||||||
|
|
||||||
|
func StdBytesMarshal(v []byte) ([]byte, error) {
|
||||||
|
size := SizeOfStdBytes(v)
|
||||||
|
buf := make([]byte, size)
|
||||||
|
_, err := StdBytesMarshalTo(v, buf)
|
||||||
|
return buf, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func StdBytesMarshalTo(v []byte, data []byte) (int, error) {
|
||||||
|
pv := &BytesValue{Value: v}
|
||||||
|
return pv.MarshalTo(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func StdBytesUnmarshal(v *[]byte, data []byte) error {
|
||||||
|
pv := &BytesValue{}
|
||||||
|
if err := pv.Unmarshal(data); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*v = pv.Value
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,66 @@
|
||||||
|
# This file lists all individuals having contributed content to the repository.
|
||||||
|
# For how it is generated, see `scripts/generate-authors.sh`.
|
||||||
|
|
||||||
|
Aaron L. Xu <likexu@harmonycloud.cn>
|
||||||
|
Aaron Lehmann <aaron.lehmann@docker.com>
|
||||||
|
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
|
||||||
|
Alexander Morozov <lk4d4@docker.com>
|
||||||
|
Alice Frosi <afrosi@de.ibm.com>
|
||||||
|
Allen Sun <allen.sun@daocloud.io>
|
||||||
|
Anda Xu <anda.xu@docker.com>
|
||||||
|
Anthony Sottile <asottile@umich.edu>
|
||||||
|
Arnaud Bailly <arnaud.oqube@gmail.com>
|
||||||
|
Bin Liu <liubin0329@gmail.com>
|
||||||
|
Brian Goff <cpuguy83@gmail.com>
|
||||||
|
Daniel Nephin <dnephin@gmail.com>
|
||||||
|
Dave Chen <dave.chen@arm.com>
|
||||||
|
David Calavera <david.calavera@gmail.com>
|
||||||
|
Dennis Chen <dennis.chen@arm.com>
|
||||||
|
Derek McGowan <derek@mcgstyle.net>
|
||||||
|
Doug Davis <dug@us.ibm.com>
|
||||||
|
Edgar Lee <edgarl@netflix.com>
|
||||||
|
Eli Uriegas <eli.uriegas@docker.com>
|
||||||
|
f0 <f0@users.noreply.github.com>
|
||||||
|
Fernando Miguel <github@FernandoMiguel.net>
|
||||||
|
Hao Hu <hao.hu.fr@gmail.com>
|
||||||
|
Helen Xie <chenjg@harmonycloud.cn>
|
||||||
|
Himanshu Pandey <hpandey@pivotal.io>
|
||||||
|
Hiromu Nakamura <abctail30@gmail.com>
|
||||||
|
Ian Campbell <ijc@docker.com>
|
||||||
|
Iskander (Alex) Sharipov <quasilyte@gmail.com>
|
||||||
|
Jean-Pierre Huynh <jean-pierre.huynh@ounet.fr>
|
||||||
|
Jessica Frazelle <acidburn@microsoft.com>
|
||||||
|
John Howard <jhoward@microsoft.com>
|
||||||
|
Jonathan Stoppani <jonathan.stoppani@divio.com>
|
||||||
|
Justas Brazauskas <brazauskasjustas@gmail.com>
|
||||||
|
Justin Cormack <justin.cormack@docker.com>
|
||||||
|
Kunal Kushwaha <kushwaha_kunal_v7@lab.ntt.co.jp>
|
||||||
|
Lajos Papp <lalyos@yahoo.com>
|
||||||
|
Matt Rickard <mrick@google.com>
|
||||||
|
Michael Crosby <crosbymichael@gmail.com>
|
||||||
|
Miyachi Katsuya <miyachi_katsuya@r.recruit.co.jp>
|
||||||
|
Nao YONASHIRO <yonashiro@r.recruit.co.jp>
|
||||||
|
Natasha Jarus <linuxmercedes@gmail.com>
|
||||||
|
Noel Georgi <18496730+frezbo@users.noreply.github.com>
|
||||||
|
Ondrej Fabry <ofabry@cisco.com>
|
||||||
|
Patrick Van Stee <patrick@vanstee.me>
|
||||||
|
Ri Xu <xuri.me@gmail.com>
|
||||||
|
Sebastiaan van Stijn <github@gone.nl>
|
||||||
|
Shev Yan <yandong_8212@163.com>
|
||||||
|
Simon Ferquel <simon.ferquel@docker.com>
|
||||||
|
Stefan Weil <sw@weilnetz.de>
|
||||||
|
Thomas Leonard <thomas.leonard@docker.com>
|
||||||
|
Thomas Shaw <tomwillfixit@users.noreply.github.com>
|
||||||
|
Tibor Vass <tibor@docker.com>
|
||||||
|
Tiffany Jernigan <tiffany.f.j@gmail.com>
|
||||||
|
Tino Rusch <tino.rusch@gmail.com>
|
||||||
|
Tobias Klauser <tklauser@distanz.ch>
|
||||||
|
Tomas Tomecek <ttomecek@redhat.com>
|
||||||
|
Tomohiro Kusumoto <zabio1192@gmail.com>
|
||||||
|
Tõnis Tiigi <tonistiigi@gmail.com>
|
||||||
|
Vincent Demeester <vincent.demeester@docker.com>
|
||||||
|
Wei Fu <fuweid89@gmail.com>
|
||||||
|
Yong Tang <yong.tang.github@outlook.com>
|
||||||
|
Yuichiro Kaneko <spiketeika@gmail.com>
|
||||||
|
Ziv Tsarfati <digger18@gmail.com>
|
||||||
|
郑泽宇 <perhapszzy@sina.com>
|
|
@ -0,0 +1,201 @@
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
46
vendor/github.com/moby/buildkit/frontend/dockerfile/command/command.go
generated
vendored
Normal file
46
vendor/github.com/moby/buildkit/frontend/dockerfile/command/command.go
generated
vendored
Normal file
|
@ -0,0 +1,46 @@
|
||||||
|
// Package command contains the set of Dockerfile commands.
|
||||||
|
package command
|
||||||
|
|
||||||
|
// Define constants for the command strings
|
||||||
|
const (
|
||||||
|
Add = "add"
|
||||||
|
Arg = "arg"
|
||||||
|
Cmd = "cmd"
|
||||||
|
Copy = "copy"
|
||||||
|
Entrypoint = "entrypoint"
|
||||||
|
Env = "env"
|
||||||
|
Expose = "expose"
|
||||||
|
From = "from"
|
||||||
|
Healthcheck = "healthcheck"
|
||||||
|
Label = "label"
|
||||||
|
Maintainer = "maintainer"
|
||||||
|
Onbuild = "onbuild"
|
||||||
|
Run = "run"
|
||||||
|
Shell = "shell"
|
||||||
|
StopSignal = "stopsignal"
|
||||||
|
User = "user"
|
||||||
|
Volume = "volume"
|
||||||
|
Workdir = "workdir"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Commands is list of all Dockerfile commands
|
||||||
|
var Commands = map[string]struct{}{
|
||||||
|
Add: {},
|
||||||
|
Arg: {},
|
||||||
|
Cmd: {},
|
||||||
|
Copy: {},
|
||||||
|
Entrypoint: {},
|
||||||
|
Env: {},
|
||||||
|
Expose: {},
|
||||||
|
From: {},
|
||||||
|
Healthcheck: {},
|
||||||
|
Label: {},
|
||||||
|
Maintainer: {},
|
||||||
|
Onbuild: {},
|
||||||
|
Run: {},
|
||||||
|
Shell: {},
|
||||||
|
StopSignal: {},
|
||||||
|
User: {},
|
||||||
|
Volume: {},
|
||||||
|
Workdir: {},
|
||||||
|
}
|
58
vendor/github.com/moby/buildkit/frontend/dockerfile/parser/errors.go
generated
vendored
Normal file
58
vendor/github.com/moby/buildkit/frontend/dockerfile/parser/errors.go
generated
vendored
Normal file
|
@ -0,0 +1,58 @@
|
||||||
|
package parser
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/moby/buildkit/util/stack"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ErrorLocation gives a location in source code that caused the error
|
||||||
|
type ErrorLocation struct {
|
||||||
|
Location []Range
|
||||||
|
error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unwrap unwraps to the next error
|
||||||
|
func (e *ErrorLocation) Unwrap() error {
|
||||||
|
return e.error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Range is a code section between two positions
|
||||||
|
type Range struct {
|
||||||
|
Start Position
|
||||||
|
End Position
|
||||||
|
}
|
||||||
|
|
||||||
|
// Position is a point in source code
|
||||||
|
type Position struct {
|
||||||
|
Line int
|
||||||
|
Character int
|
||||||
|
}
|
||||||
|
|
||||||
|
func withLocation(err error, start, end int) error {
|
||||||
|
return WithLocation(err, toRanges(start, end))
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithLocation extends an error with a source code location
|
||||||
|
func WithLocation(err error, location []Range) error {
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var el *ErrorLocation
|
||||||
|
if errors.As(err, &el) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return stack.Enable(&ErrorLocation{
|
||||||
|
error: err,
|
||||||
|
Location: location,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func toRanges(start, end int) (r []Range) {
|
||||||
|
if end <= start {
|
||||||
|
end = start
|
||||||
|
}
|
||||||
|
for i := start; i <= end; i++ {
|
||||||
|
r = append(r, Range{Start: Position{Line: i}, End: Position{Line: i}})
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
369
vendor/github.com/moby/buildkit/frontend/dockerfile/parser/line_parsers.go
generated
vendored
Normal file
369
vendor/github.com/moby/buildkit/frontend/dockerfile/parser/line_parsers.go
generated
vendored
Normal file
|
@ -0,0 +1,369 @@
|
||||||
|
package parser
|
||||||
|
|
||||||
|
// line parsers are dispatch calls that parse a single unit of text into a
|
||||||
|
// Node object which contains the whole statement. Dockerfiles have varied
|
||||||
|
// (but not usually unique, see ONBUILD for a unique example) parsing rules
|
||||||
|
// per-command, and these unify the processing in a way that makes it
|
||||||
|
// manageable.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"unicode"
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
errDockerfileNotStringArray = errors.New("when using JSON array syntax, arrays must be comprised of strings only")
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
commandLabel = "LABEL"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ignore the current argument. This will still leave a command parsed, but
|
||||||
|
// will not incorporate the arguments into the ast.
|
||||||
|
func parseIgnore(rest string, d *directives) (*Node, map[string]bool, error) {
|
||||||
|
return &Node{}, nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// used for onbuild. Could potentially be used for anything that represents a
|
||||||
|
// statement with sub-statements.
|
||||||
|
//
|
||||||
|
// ONBUILD RUN foo bar -> (onbuild (run foo bar))
|
||||||
|
//
|
||||||
|
func parseSubCommand(rest string, d *directives) (*Node, map[string]bool, error) {
|
||||||
|
if rest == "" {
|
||||||
|
return nil, nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
child, err := newNodeFromLine(rest, d, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Node{Children: []*Node{child}}, nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// helper to parse words (i.e space delimited or quoted strings) in a statement.
|
||||||
|
// The quotes are preserved as part of this function and they are stripped later
|
||||||
|
// as part of processWords().
|
||||||
|
func parseWords(rest string, d *directives) []string {
|
||||||
|
const (
|
||||||
|
inSpaces = iota // looking for start of a word
|
||||||
|
inWord
|
||||||
|
inQuote
|
||||||
|
)
|
||||||
|
|
||||||
|
words := []string{}
|
||||||
|
phase := inSpaces
|
||||||
|
word := ""
|
||||||
|
quote := '\000'
|
||||||
|
blankOK := false
|
||||||
|
var ch rune
|
||||||
|
var chWidth int
|
||||||
|
|
||||||
|
for pos := 0; pos <= len(rest); pos += chWidth {
|
||||||
|
if pos != len(rest) {
|
||||||
|
ch, chWidth = utf8.DecodeRuneInString(rest[pos:])
|
||||||
|
}
|
||||||
|
|
||||||
|
if phase == inSpaces { // Looking for start of word
|
||||||
|
if pos == len(rest) { // end of input
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if unicode.IsSpace(ch) { // skip spaces
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
phase = inWord // found it, fall through
|
||||||
|
}
|
||||||
|
if (phase == inWord || phase == inQuote) && (pos == len(rest)) {
|
||||||
|
if blankOK || len(word) > 0 {
|
||||||
|
words = append(words, word)
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if phase == inWord {
|
||||||
|
if unicode.IsSpace(ch) {
|
||||||
|
phase = inSpaces
|
||||||
|
if blankOK || len(word) > 0 {
|
||||||
|
words = append(words, word)
|
||||||
|
}
|
||||||
|
word = ""
|
||||||
|
blankOK = false
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if ch == '\'' || ch == '"' {
|
||||||
|
quote = ch
|
||||||
|
blankOK = true
|
||||||
|
phase = inQuote
|
||||||
|
}
|
||||||
|
if ch == d.escapeToken {
|
||||||
|
if pos+chWidth == len(rest) {
|
||||||
|
continue // just skip an escape token at end of line
|
||||||
|
}
|
||||||
|
// If we're not quoted and we see an escape token, then always just
|
||||||
|
// add the escape token plus the char to the word, even if the char
|
||||||
|
// is a quote.
|
||||||
|
word += string(ch)
|
||||||
|
pos += chWidth
|
||||||
|
ch, chWidth = utf8.DecodeRuneInString(rest[pos:])
|
||||||
|
}
|
||||||
|
word += string(ch)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if phase == inQuote {
|
||||||
|
if ch == quote {
|
||||||
|
phase = inWord
|
||||||
|
}
|
||||||
|
// The escape token is special except for ' quotes - can't escape anything for '
|
||||||
|
if ch == d.escapeToken && quote != '\'' {
|
||||||
|
if pos+chWidth == len(rest) {
|
||||||
|
phase = inWord
|
||||||
|
continue // just skip the escape token at end
|
||||||
|
}
|
||||||
|
pos += chWidth
|
||||||
|
word += string(ch)
|
||||||
|
ch, chWidth = utf8.DecodeRuneInString(rest[pos:])
|
||||||
|
}
|
||||||
|
word += string(ch)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return words
|
||||||
|
}
|
||||||
|
|
||||||
|
// parse environment like statements. Note that this does *not* handle
|
||||||
|
// variable interpolation, which will be handled in the evaluator.
|
||||||
|
func parseNameVal(rest string, key string, d *directives) (*Node, error) {
|
||||||
|
// This is kind of tricky because we need to support the old
|
||||||
|
// variant: KEY name value
|
||||||
|
// as well as the new one: KEY name=value ...
|
||||||
|
// The trigger to know which one is being used will be whether we hit
|
||||||
|
// a space or = first. space ==> old, "=" ==> new
|
||||||
|
|
||||||
|
words := parseWords(rest, d)
|
||||||
|
if len(words) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Old format (KEY name value)
|
||||||
|
if !strings.Contains(words[0], "=") {
|
||||||
|
parts := reWhitespace.Split(rest, 2)
|
||||||
|
if len(parts) < 2 {
|
||||||
|
return nil, fmt.Errorf(key + " must have two arguments")
|
||||||
|
}
|
||||||
|
return newKeyValueNode(parts[0], parts[1]), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var rootNode *Node
|
||||||
|
var prevNode *Node
|
||||||
|
for _, word := range words {
|
||||||
|
if !strings.Contains(word, "=") {
|
||||||
|
return nil, fmt.Errorf("Syntax error - can't find = in %q. Must be of the form: name=value", word)
|
||||||
|
}
|
||||||
|
|
||||||
|
parts := strings.SplitN(word, "=", 2)
|
||||||
|
node := newKeyValueNode(parts[0], parts[1])
|
||||||
|
rootNode, prevNode = appendKeyValueNode(node, rootNode, prevNode)
|
||||||
|
}
|
||||||
|
|
||||||
|
return rootNode, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newKeyValueNode(key, value string) *Node {
|
||||||
|
return &Node{
|
||||||
|
Value: key,
|
||||||
|
Next: &Node{Value: value},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func appendKeyValueNode(node, rootNode, prevNode *Node) (*Node, *Node) {
|
||||||
|
if rootNode == nil {
|
||||||
|
rootNode = node
|
||||||
|
}
|
||||||
|
if prevNode != nil {
|
||||||
|
prevNode.Next = node
|
||||||
|
}
|
||||||
|
|
||||||
|
prevNode = node.Next
|
||||||
|
return rootNode, prevNode
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseEnv(rest string, d *directives) (*Node, map[string]bool, error) {
|
||||||
|
node, err := parseNameVal(rest, "ENV", d)
|
||||||
|
return node, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseLabel(rest string, d *directives) (*Node, map[string]bool, error) {
|
||||||
|
node, err := parseNameVal(rest, commandLabel, d)
|
||||||
|
return node, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// parses a statement containing one or more keyword definition(s) and/or
|
||||||
|
// value assignments, like `name1 name2= name3="" name4=value`.
|
||||||
|
// Note that this is a stricter format than the old format of assignment,
|
||||||
|
// allowed by parseNameVal(), in a way that this only allows assignment of the
|
||||||
|
// form `keyword=[<value>]` like `name2=`, `name3=""`, and `name4=value` above.
|
||||||
|
// In addition, a keyword definition alone is of the form `keyword` like `name1`
|
||||||
|
// above. And the assignments `name2=` and `name3=""` are equivalent and
|
||||||
|
// assign an empty value to the respective keywords.
|
||||||
|
func parseNameOrNameVal(rest string, d *directives) (*Node, map[string]bool, error) {
|
||||||
|
words := parseWords(rest, d)
|
||||||
|
if len(words) == 0 {
|
||||||
|
return nil, nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
rootnode *Node
|
||||||
|
prevNode *Node
|
||||||
|
)
|
||||||
|
for i, word := range words {
|
||||||
|
node := &Node{}
|
||||||
|
node.Value = word
|
||||||
|
if i == 0 {
|
||||||
|
rootnode = node
|
||||||
|
} else {
|
||||||
|
prevNode.Next = node
|
||||||
|
}
|
||||||
|
prevNode = node
|
||||||
|
}
|
||||||
|
|
||||||
|
return rootnode, nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parses a whitespace-delimited set of arguments. The result is effectively a
|
||||||
|
// linked list of string arguments.
|
||||||
|
func parseStringsWhitespaceDelimited(rest string, d *directives) (*Node, map[string]bool, error) {
|
||||||
|
if rest == "" {
|
||||||
|
return nil, nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
node := &Node{}
|
||||||
|
rootnode := node
|
||||||
|
prevnode := node
|
||||||
|
for _, str := range reWhitespace.Split(rest, -1) { // use regexp
|
||||||
|
prevnode = node
|
||||||
|
node.Value = str
|
||||||
|
node.Next = &Node{}
|
||||||
|
node = node.Next
|
||||||
|
}
|
||||||
|
|
||||||
|
// XXX to get around regexp.Split *always* providing an empty string at the
|
||||||
|
// end due to how our loop is constructed, nil out the last node in the
|
||||||
|
// chain.
|
||||||
|
prevnode.Next = nil
|
||||||
|
|
||||||
|
return rootnode, nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseString just wraps the string in quotes and returns a working node.
|
||||||
|
func parseString(rest string, d *directives) (*Node, map[string]bool, error) {
|
||||||
|
if rest == "" {
|
||||||
|
return nil, nil, nil
|
||||||
|
}
|
||||||
|
n := &Node{}
|
||||||
|
n.Value = rest
|
||||||
|
return n, nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseJSON converts JSON arrays to an AST.
|
||||||
|
func parseJSON(rest string, d *directives) (*Node, map[string]bool, error) {
|
||||||
|
rest = strings.TrimLeftFunc(rest, unicode.IsSpace)
|
||||||
|
if !strings.HasPrefix(rest, "[") {
|
||||||
|
return nil, nil, fmt.Errorf(`Error parsing "%s" as a JSON array`, rest)
|
||||||
|
}
|
||||||
|
|
||||||
|
var myJSON []interface{}
|
||||||
|
if err := json.NewDecoder(strings.NewReader(rest)).Decode(&myJSON); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var top, prev *Node
|
||||||
|
for _, str := range myJSON {
|
||||||
|
s, ok := str.(string)
|
||||||
|
if !ok {
|
||||||
|
return nil, nil, errDockerfileNotStringArray
|
||||||
|
}
|
||||||
|
|
||||||
|
node := &Node{Value: s}
|
||||||
|
if prev == nil {
|
||||||
|
top = node
|
||||||
|
} else {
|
||||||
|
prev.Next = node
|
||||||
|
}
|
||||||
|
prev = node
|
||||||
|
}
|
||||||
|
|
||||||
|
return top, map[string]bool{"json": true}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseMaybeJSON determines if the argument appears to be a JSON array. If
|
||||||
|
// so, passes to parseJSON; if not, quotes the result and returns a single
|
||||||
|
// node.
|
||||||
|
func parseMaybeJSON(rest string, d *directives) (*Node, map[string]bool, error) {
|
||||||
|
if rest == "" {
|
||||||
|
return nil, nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
node, attrs, err := parseJSON(rest, d)
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
return node, attrs, nil
|
||||||
|
}
|
||||||
|
if err == errDockerfileNotStringArray {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
node = &Node{}
|
||||||
|
node.Value = rest
|
||||||
|
return node, nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseMaybeJSONToList determines if the argument appears to be a JSON array. If
|
||||||
|
// so, passes to parseJSON; if not, attempts to parse it as a whitespace
|
||||||
|
// delimited string.
|
||||||
|
func parseMaybeJSONToList(rest string, d *directives) (*Node, map[string]bool, error) {
|
||||||
|
node, attrs, err := parseJSON(rest, d)
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
return node, attrs, nil
|
||||||
|
}
|
||||||
|
if err == errDockerfileNotStringArray {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return parseStringsWhitespaceDelimited(rest, d)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The HEALTHCHECK command is like parseMaybeJSON, but has an extra type argument.
|
||||||
|
func parseHealthConfig(rest string, d *directives) (*Node, map[string]bool, error) {
|
||||||
|
// Find end of first argument
|
||||||
|
var sep int
|
||||||
|
for ; sep < len(rest); sep++ {
|
||||||
|
if unicode.IsSpace(rune(rest[sep])) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
next := sep
|
||||||
|
for ; next < len(rest); next++ {
|
||||||
|
if !unicode.IsSpace(rune(rest[next])) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if sep == 0 {
|
||||||
|
return nil, nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
typ := rest[:sep]
|
||||||
|
cmd, attrs, err := parseMaybeJSON(rest[next:], d)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Node{Value: typ, Next: cmd}, attrs, err
|
||||||
|
}
|
573
vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go
generated
vendored
Normal file
573
vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go
generated
vendored
Normal file
|
@ -0,0 +1,573 @@
|
||||||
|
// Package parser implements a parser and parse tree dumper for Dockerfiles.
|
||||||
|
package parser
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"regexp"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"unicode"
|
||||||
|
|
||||||
|
"github.com/moby/buildkit/frontend/dockerfile/command"
|
||||||
|
"github.com/moby/buildkit/frontend/dockerfile/shell"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Node is a structure used to represent a parse tree.
|
||||||
|
//
|
||||||
|
// In the node there are three fields, Value, Next, and Children. Value is the
|
||||||
|
// current token's string value. Next is always the next non-child token, and
|
||||||
|
// children contains all the children. Here's an example:
|
||||||
|
//
|
||||||
|
// (value next (child child-next child-next-next) next-next)
|
||||||
|
//
|
||||||
|
// This data structure is frankly pretty lousy for handling complex languages,
|
||||||
|
// but lucky for us the Dockerfile isn't very complicated. This structure
|
||||||
|
// works a little more effectively than a "proper" parse tree for our needs.
|
||||||
|
//
|
||||||
|
type Node struct {
|
||||||
|
Value string // actual content
|
||||||
|
Next *Node // the next item in the current sexp
|
||||||
|
Children []*Node // the children of this sexp
|
||||||
|
Heredocs []Heredoc // extra heredoc content attachments
|
||||||
|
Attributes map[string]bool // special attributes for this node
|
||||||
|
Original string // original line used before parsing
|
||||||
|
Flags []string // only top Node should have this set
|
||||||
|
StartLine int // the line in the original dockerfile where the node begins
|
||||||
|
EndLine int // the line in the original dockerfile where the node ends
|
||||||
|
PrevComment []string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Location return the location of node in source code
|
||||||
|
func (node *Node) Location() []Range {
|
||||||
|
return toRanges(node.StartLine, node.EndLine)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dump dumps the AST defined by `node` as a list of sexps.
|
||||||
|
// Returns a string suitable for printing.
|
||||||
|
func (node *Node) Dump() string {
|
||||||
|
str := ""
|
||||||
|
str += strings.ToLower(node.Value)
|
||||||
|
|
||||||
|
if len(node.Flags) > 0 {
|
||||||
|
str += fmt.Sprintf(" %q", node.Flags)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, n := range node.Children {
|
||||||
|
str += "(" + n.Dump() + ")\n"
|
||||||
|
}
|
||||||
|
|
||||||
|
for n := node.Next; n != nil; n = n.Next {
|
||||||
|
if len(n.Children) > 0 {
|
||||||
|
str += " " + n.Dump()
|
||||||
|
} else {
|
||||||
|
str += " " + strconv.Quote(n.Value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.TrimSpace(str)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *Node) lines(start, end int) {
|
||||||
|
node.StartLine = start
|
||||||
|
node.EndLine = end
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *Node) canContainHeredoc() bool {
|
||||||
|
// check for compound commands, like ONBUILD
|
||||||
|
if ok := heredocCompoundDirectives[strings.ToLower(node.Value)]; ok {
|
||||||
|
if node.Next != nil && len(node.Next.Children) > 0 {
|
||||||
|
node = node.Next.Children[0]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if ok := heredocDirectives[strings.ToLower(node.Value)]; !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if isJSON := node.Attributes["json"]; isJSON {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddChild adds a new child node, and updates line information
|
||||||
|
func (node *Node) AddChild(child *Node, startLine, endLine int) {
|
||||||
|
child.lines(startLine, endLine)
|
||||||
|
if node.StartLine < 0 {
|
||||||
|
node.StartLine = startLine
|
||||||
|
}
|
||||||
|
node.EndLine = endLine
|
||||||
|
node.Children = append(node.Children, child)
|
||||||
|
}
|
||||||
|
|
||||||
|
type Heredoc struct {
|
||||||
|
Name string
|
||||||
|
FileDescriptor uint
|
||||||
|
Expand bool
|
||||||
|
Chomp bool
|
||||||
|
Content string
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
dispatch map[string]func(string, *directives) (*Node, map[string]bool, error)
|
||||||
|
reWhitespace = regexp.MustCompile(`[\t\v\f\r ]+`)
|
||||||
|
reDirectives = regexp.MustCompile(`^#\s*([a-zA-Z][a-zA-Z0-9]*)\s*=\s*(.+?)\s*$`)
|
||||||
|
reComment = regexp.MustCompile(`^#.*$`)
|
||||||
|
reHeredoc = regexp.MustCompile(`^(\d*)<<(-?)([^<]*)$`)
|
||||||
|
reLeadingTabs = regexp.MustCompile(`(?m)^\t+`)
|
||||||
|
)
|
||||||
|
|
||||||
|
// DefaultEscapeToken is the default escape token
|
||||||
|
const DefaultEscapeToken = '\\'
|
||||||
|
|
||||||
|
var validDirectives = map[string]struct{}{
|
||||||
|
"escape": {},
|
||||||
|
"syntax": {},
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// Directives allowed to contain heredocs
|
||||||
|
heredocDirectives = map[string]bool{
|
||||||
|
command.Add: true,
|
||||||
|
command.Copy: true,
|
||||||
|
command.Run: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Directives allowed to contain directives containing heredocs
|
||||||
|
heredocCompoundDirectives = map[string]bool{
|
||||||
|
command.Onbuild: true,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// directive is the structure used during a build run to hold the state of
|
||||||
|
// parsing directives.
|
||||||
|
type directives struct {
|
||||||
|
escapeToken rune // Current escape token
|
||||||
|
lineContinuationRegex *regexp.Regexp // Current line continuation regex
|
||||||
|
done bool // Whether we are done looking for directives
|
||||||
|
seen map[string]struct{} // Whether the escape directive has been seen
|
||||||
|
}
|
||||||
|
|
||||||
|
// setEscapeToken sets the default token for escaping characters and as line-
|
||||||
|
// continuation token in a Dockerfile. Only ` (backtick) and \ (backslash) are
|
||||||
|
// allowed as token.
|
||||||
|
func (d *directives) setEscapeToken(s string) error {
|
||||||
|
if s != "`" && s != `\` {
|
||||||
|
return errors.Errorf("invalid escape token '%s' does not match ` or \\", s)
|
||||||
|
}
|
||||||
|
d.escapeToken = rune(s[0])
|
||||||
|
// The escape token is used both to escape characters in a line and as line
|
||||||
|
// continuation token. If it's the last non-whitespace token, it is used as
|
||||||
|
// line-continuation token, *unless* preceded by an escape-token.
|
||||||
|
//
|
||||||
|
// The second branch in the regular expression handles line-continuation
|
||||||
|
// tokens on their own line, which don't have any character preceding them.
|
||||||
|
//
|
||||||
|
// Due to Go lacking negative look-ahead matching, this regular expression
|
||||||
|
// does not currently handle a line-continuation token preceded by an *escaped*
|
||||||
|
// escape-token ("foo \\\").
|
||||||
|
d.lineContinuationRegex = regexp.MustCompile(`([^\` + s + `])\` + s + `[ \t]*$|^\` + s + `[ \t]*$`)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// possibleParserDirective looks for parser directives, eg '# escapeToken=<char>'.
|
||||||
|
// Parser directives must precede any builder instruction or other comments,
|
||||||
|
// and cannot be repeated.
|
||||||
|
func (d *directives) possibleParserDirective(line string) error {
|
||||||
|
if d.done {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
match := reDirectives.FindStringSubmatch(line)
|
||||||
|
if len(match) == 0 {
|
||||||
|
d.done = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
k := strings.ToLower(match[1])
|
||||||
|
_, ok := validDirectives[k]
|
||||||
|
if !ok {
|
||||||
|
d.done = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := d.seen[k]; ok {
|
||||||
|
return errors.Errorf("only one %s parser directive can be used", k)
|
||||||
|
}
|
||||||
|
d.seen[k] = struct{}{}
|
||||||
|
|
||||||
|
if k == "escape" {
|
||||||
|
return d.setEscapeToken(match[2])
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// newDefaultDirectives returns a new directives structure with the default escapeToken token
|
||||||
|
func newDefaultDirectives() *directives {
|
||||||
|
d := &directives{
|
||||||
|
seen: map[string]struct{}{},
|
||||||
|
}
|
||||||
|
d.setEscapeToken(string(DefaultEscapeToken))
|
||||||
|
return d
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// Dispatch Table. see line_parsers.go for the parse functions.
|
||||||
|
// The command is parsed and mapped to the line parser. The line parser
|
||||||
|
// receives the arguments but not the command, and returns an AST after
|
||||||
|
// reformulating the arguments according to the rules in the parser
|
||||||
|
// functions. Errors are propagated up by Parse() and the resulting AST can
|
||||||
|
// be incorporated directly into the existing AST as a next.
|
||||||
|
dispatch = map[string]func(string, *directives) (*Node, map[string]bool, error){
|
||||||
|
command.Add: parseMaybeJSONToList,
|
||||||
|
command.Arg: parseNameOrNameVal,
|
||||||
|
command.Cmd: parseMaybeJSON,
|
||||||
|
command.Copy: parseMaybeJSONToList,
|
||||||
|
command.Entrypoint: parseMaybeJSON,
|
||||||
|
command.Env: parseEnv,
|
||||||
|
command.Expose: parseStringsWhitespaceDelimited,
|
||||||
|
command.From: parseStringsWhitespaceDelimited,
|
||||||
|
command.Healthcheck: parseHealthConfig,
|
||||||
|
command.Label: parseLabel,
|
||||||
|
command.Maintainer: parseString,
|
||||||
|
command.Onbuild: parseSubCommand,
|
||||||
|
command.Run: parseMaybeJSON,
|
||||||
|
command.Shell: parseMaybeJSON,
|
||||||
|
command.StopSignal: parseString,
|
||||||
|
command.User: parseString,
|
||||||
|
command.Volume: parseMaybeJSONToList,
|
||||||
|
command.Workdir: parseString,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// newNodeFromLine splits the line into parts, and dispatches to a function
|
||||||
|
// based on the command and command arguments. A Node is created from the
|
||||||
|
// result of the dispatch.
|
||||||
|
func newNodeFromLine(line string, d *directives, comments []string) (*Node, error) {
|
||||||
|
cmd, flags, args, err := splitCommand(line)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
fn := dispatch[strings.ToLower(cmd)]
|
||||||
|
// Ignore invalid Dockerfile instructions
|
||||||
|
if fn == nil {
|
||||||
|
fn = parseIgnore
|
||||||
|
}
|
||||||
|
next, attrs, err := fn(args, d)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Node{
|
||||||
|
Value: cmd,
|
||||||
|
Original: line,
|
||||||
|
Flags: flags,
|
||||||
|
Next: next,
|
||||||
|
Attributes: attrs,
|
||||||
|
PrevComment: comments,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Result is the result of parsing a Dockerfile
|
||||||
|
type Result struct {
|
||||||
|
AST *Node
|
||||||
|
EscapeToken rune
|
||||||
|
Warnings []Warning
|
||||||
|
}
|
||||||
|
|
||||||
|
type Warning struct {
|
||||||
|
Short string
|
||||||
|
Detail [][]byte
|
||||||
|
URL string
|
||||||
|
Location *Range
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrintWarnings to the writer
|
||||||
|
func (r *Result) PrintWarnings(out io.Writer) {
|
||||||
|
if len(r.Warnings) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for _, w := range r.Warnings {
|
||||||
|
fmt.Fprintf(out, "[WARNING]: %s\n", w.Short)
|
||||||
|
}
|
||||||
|
if len(r.Warnings) > 0 {
|
||||||
|
fmt.Fprintf(out, "[WARNING]: Empty continuation lines will become errors in a future release.\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse reads lines from a Reader, parses the lines into an AST and returns
|
||||||
|
// the AST and escape token
|
||||||
|
func Parse(rwc io.Reader) (*Result, error) {
|
||||||
|
d := newDefaultDirectives()
|
||||||
|
currentLine := 0
|
||||||
|
root := &Node{StartLine: -1}
|
||||||
|
scanner := bufio.NewScanner(rwc)
|
||||||
|
scanner.Split(scanLines)
|
||||||
|
warnings := []Warning{}
|
||||||
|
var comments []string
|
||||||
|
|
||||||
|
var err error
|
||||||
|
for scanner.Scan() {
|
||||||
|
bytesRead := scanner.Bytes()
|
||||||
|
if currentLine == 0 {
|
||||||
|
// First line, strip the byte-order-marker if present
|
||||||
|
bytesRead = bytes.TrimPrefix(bytesRead, utf8bom)
|
||||||
|
}
|
||||||
|
if isComment(bytesRead) {
|
||||||
|
comment := strings.TrimSpace(string(bytesRead[1:]))
|
||||||
|
if comment == "" {
|
||||||
|
comments = nil
|
||||||
|
} else {
|
||||||
|
comments = append(comments, comment)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
bytesRead, err = processLine(d, bytesRead, true)
|
||||||
|
if err != nil {
|
||||||
|
return nil, withLocation(err, currentLine, 0)
|
||||||
|
}
|
||||||
|
currentLine++
|
||||||
|
|
||||||
|
startLine := currentLine
|
||||||
|
line, isEndOfLine := trimContinuationCharacter(string(bytesRead), d)
|
||||||
|
if isEndOfLine && line == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
var hasEmptyContinuationLine bool
|
||||||
|
for !isEndOfLine && scanner.Scan() {
|
||||||
|
bytesRead, err := processLine(d, scanner.Bytes(), false)
|
||||||
|
if err != nil {
|
||||||
|
return nil, withLocation(err, currentLine, 0)
|
||||||
|
}
|
||||||
|
currentLine++
|
||||||
|
|
||||||
|
if isComment(scanner.Bytes()) {
|
||||||
|
// original line was a comment (processLine strips comments)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if isEmptyContinuationLine(bytesRead) {
|
||||||
|
hasEmptyContinuationLine = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
continuationLine := string(bytesRead)
|
||||||
|
continuationLine, isEndOfLine = trimContinuationCharacter(continuationLine, d)
|
||||||
|
line += continuationLine
|
||||||
|
}
|
||||||
|
|
||||||
|
if hasEmptyContinuationLine {
|
||||||
|
warnings = append(warnings, Warning{
|
||||||
|
Short: "Empty continuation line found in: " + line,
|
||||||
|
Detail: [][]byte{[]byte("Empty continuation lines will become errors in a future release")},
|
||||||
|
URL: "https://github.com/moby/moby/pull/33719",
|
||||||
|
Location: &Range{Start: Position{Line: currentLine}, End: Position{Line: currentLine}},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
child, err := newNodeFromLine(line, d, comments)
|
||||||
|
if err != nil {
|
||||||
|
return nil, withLocation(err, startLine, currentLine)
|
||||||
|
}
|
||||||
|
|
||||||
|
if child.canContainHeredoc() {
|
||||||
|
heredocs, err := heredocsFromLine(line)
|
||||||
|
if err != nil {
|
||||||
|
return nil, withLocation(err, startLine, currentLine)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, heredoc := range heredocs {
|
||||||
|
terminator := []byte(heredoc.Name)
|
||||||
|
terminated := false
|
||||||
|
for scanner.Scan() {
|
||||||
|
bytesRead := scanner.Bytes()
|
||||||
|
currentLine++
|
||||||
|
|
||||||
|
possibleTerminator := trimNewline(bytesRead)
|
||||||
|
if heredoc.Chomp {
|
||||||
|
possibleTerminator = trimLeadingTabs(possibleTerminator)
|
||||||
|
}
|
||||||
|
if bytes.Equal(possibleTerminator, terminator) {
|
||||||
|
terminated = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
heredoc.Content += string(bytesRead)
|
||||||
|
}
|
||||||
|
if !terminated {
|
||||||
|
return nil, withLocation(errors.New("unterminated heredoc"), startLine, currentLine)
|
||||||
|
}
|
||||||
|
|
||||||
|
child.Heredocs = append(child.Heredocs, heredoc)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
root.AddChild(child, startLine, currentLine)
|
||||||
|
comments = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if root.StartLine < 0 {
|
||||||
|
return nil, withLocation(errors.New("file with no instructions"), currentLine, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Result{
|
||||||
|
AST: root,
|
||||||
|
Warnings: warnings,
|
||||||
|
EscapeToken: d.escapeToken,
|
||||||
|
}, withLocation(handleScannerError(scanner.Err()), currentLine, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extracts a heredoc from a possible heredoc regex match
|
||||||
|
func heredocFromMatch(match []string) (*Heredoc, error) {
|
||||||
|
if len(match) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
fd, _ := strconv.ParseUint(match[1], 10, 0)
|
||||||
|
chomp := match[2] == "-"
|
||||||
|
rest := match[3]
|
||||||
|
|
||||||
|
if len(rest) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
shlex := shell.NewLex('\\')
|
||||||
|
shlex.SkipUnsetEnv = true
|
||||||
|
|
||||||
|
// Attempt to parse both the heredoc both with *and* without quotes.
|
||||||
|
// If there are quotes in one but not the other, then we know that some
|
||||||
|
// part of the heredoc word is quoted, so we shouldn't expand the content.
|
||||||
|
shlex.RawQuotes = false
|
||||||
|
words, err := shlex.ProcessWords(rest, []string{})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// quick sanity check that rest is a single word
|
||||||
|
if len(words) != 1 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
shlex.RawQuotes = true
|
||||||
|
wordsRaw, err := shlex.ProcessWords(rest, []string{})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(wordsRaw) != len(words) {
|
||||||
|
return nil, fmt.Errorf("internal lexing of heredoc produced inconsistent results: %s", rest)
|
||||||
|
}
|
||||||
|
|
||||||
|
word := words[0]
|
||||||
|
wordQuoteCount := strings.Count(word, `'`) + strings.Count(word, `"`)
|
||||||
|
wordRaw := wordsRaw[0]
|
||||||
|
wordRawQuoteCount := strings.Count(wordRaw, `'`) + strings.Count(wordRaw, `"`)
|
||||||
|
|
||||||
|
expand := wordQuoteCount == wordRawQuoteCount
|
||||||
|
|
||||||
|
return &Heredoc{
|
||||||
|
Name: word,
|
||||||
|
Expand: expand,
|
||||||
|
Chomp: chomp,
|
||||||
|
FileDescriptor: uint(fd),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func ParseHeredoc(src string) (*Heredoc, error) {
|
||||||
|
return heredocFromMatch(reHeredoc.FindStringSubmatch(src))
|
||||||
|
}
|
||||||
|
func MustParseHeredoc(src string) *Heredoc {
|
||||||
|
heredoc, _ := ParseHeredoc(src)
|
||||||
|
return heredoc
|
||||||
|
}
|
||||||
|
|
||||||
|
func heredocsFromLine(line string) ([]Heredoc, error) {
|
||||||
|
shlex := shell.NewLex('\\')
|
||||||
|
shlex.RawQuotes = true
|
||||||
|
shlex.RawEscapes = true
|
||||||
|
shlex.SkipUnsetEnv = true
|
||||||
|
words, _ := shlex.ProcessWords(line, []string{})
|
||||||
|
|
||||||
|
var docs []Heredoc
|
||||||
|
for _, word := range words {
|
||||||
|
heredoc, err := ParseHeredoc(word)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if heredoc != nil {
|
||||||
|
docs = append(docs, *heredoc)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return docs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func ChompHeredocContent(src string) string {
|
||||||
|
return reLeadingTabs.ReplaceAllString(src, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
func trimComments(src []byte) []byte {
|
||||||
|
return reComment.ReplaceAll(src, []byte{})
|
||||||
|
}
|
||||||
|
|
||||||
|
func trimLeadingWhitespace(src []byte) []byte {
|
||||||
|
return bytes.TrimLeftFunc(src, unicode.IsSpace)
|
||||||
|
}
|
||||||
|
func trimLeadingTabs(src []byte) []byte {
|
||||||
|
return bytes.TrimLeft(src, "\t")
|
||||||
|
}
|
||||||
|
func trimNewline(src []byte) []byte {
|
||||||
|
return bytes.TrimRight(src, "\r\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
func isComment(line []byte) bool {
|
||||||
|
return reComment.Match(trimLeadingWhitespace(trimNewline(line)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func isEmptyContinuationLine(line []byte) bool {
|
||||||
|
return len(trimLeadingWhitespace(trimNewline(line))) == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
var utf8bom = []byte{0xEF, 0xBB, 0xBF}
|
||||||
|
|
||||||
|
func trimContinuationCharacter(line string, d *directives) (string, bool) {
|
||||||
|
if d.lineContinuationRegex.MatchString(line) {
|
||||||
|
line = d.lineContinuationRegex.ReplaceAllString(line, "$1")
|
||||||
|
return line, false
|
||||||
|
}
|
||||||
|
return line, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: remove stripLeftWhitespace after deprecation period. It seems silly
|
||||||
|
// to preserve whitespace on continuation lines. Why is that done?
|
||||||
|
func processLine(d *directives, token []byte, stripLeftWhitespace bool) ([]byte, error) {
|
||||||
|
token = trimNewline(token)
|
||||||
|
if stripLeftWhitespace {
|
||||||
|
token = trimLeadingWhitespace(token)
|
||||||
|
}
|
||||||
|
return trimComments(token), d.possibleParserDirective(string(token))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Variation of bufio.ScanLines that preserves the line endings
|
||||||
|
func scanLines(data []byte, atEOF bool) (advance int, token []byte, err error) {
|
||||||
|
if atEOF && len(data) == 0 {
|
||||||
|
return 0, nil, nil
|
||||||
|
}
|
||||||
|
if i := bytes.IndexByte(data, '\n'); i >= 0 {
|
||||||
|
return i + 1, data[0 : i+1], nil
|
||||||
|
}
|
||||||
|
if atEOF {
|
||||||
|
return len(data), data, nil
|
||||||
|
}
|
||||||
|
return 0, nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleScannerError(err error) error {
|
||||||
|
switch err {
|
||||||
|
case bufio.ErrTooLong:
|
||||||
|
return errors.Errorf("dockerfile line greater than max allowed size of %d", bufio.MaxScanTokenSize-1)
|
||||||
|
default:
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
117
vendor/github.com/moby/buildkit/frontend/dockerfile/parser/split_command.go
generated
vendored
Normal file
117
vendor/github.com/moby/buildkit/frontend/dockerfile/parser/split_command.go
generated
vendored
Normal file
|
@ -0,0 +1,117 @@
|
||||||
|
package parser
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
"unicode"
|
||||||
|
)
|
||||||
|
|
||||||
|
// splitCommand takes a single line of text and parses out the cmd and args,
|
||||||
|
// which are used for dispatching to more exact parsing functions.
|
||||||
|
func splitCommand(line string) (string, []string, string, error) {
|
||||||
|
var args string
|
||||||
|
var flags []string
|
||||||
|
|
||||||
|
// Make sure we get the same results irrespective of leading/trailing spaces
|
||||||
|
cmdline := reWhitespace.Split(strings.TrimSpace(line), 2)
|
||||||
|
|
||||||
|
if len(cmdline) == 2 {
|
||||||
|
var err error
|
||||||
|
args, flags, err = extractBuilderFlags(cmdline[1])
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, "", err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return cmdline[0], flags, strings.TrimSpace(args), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func extractBuilderFlags(line string) (string, []string, error) {
|
||||||
|
// Parses the BuilderFlags and returns the remaining part of the line
|
||||||
|
|
||||||
|
const (
|
||||||
|
inSpaces = iota // looking for start of a word
|
||||||
|
inWord
|
||||||
|
inQuote
|
||||||
|
)
|
||||||
|
|
||||||
|
words := []string{}
|
||||||
|
phase := inSpaces
|
||||||
|
word := ""
|
||||||
|
quote := '\000'
|
||||||
|
blankOK := false
|
||||||
|
var ch rune
|
||||||
|
|
||||||
|
for pos := 0; pos <= len(line); pos++ {
|
||||||
|
if pos != len(line) {
|
||||||
|
ch = rune(line[pos])
|
||||||
|
}
|
||||||
|
|
||||||
|
if phase == inSpaces { // Looking for start of word
|
||||||
|
if pos == len(line) { // end of input
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if unicode.IsSpace(ch) { // skip spaces
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only keep going if the next word starts with --
|
||||||
|
if ch != '-' || pos+1 == len(line) || rune(line[pos+1]) != '-' {
|
||||||
|
return line[pos:], words, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
phase = inWord // found something with "--", fall through
|
||||||
|
}
|
||||||
|
if (phase == inWord || phase == inQuote) && (pos == len(line)) {
|
||||||
|
if word != "--" && (blankOK || len(word) > 0) {
|
||||||
|
words = append(words, word)
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if phase == inWord {
|
||||||
|
if unicode.IsSpace(ch) {
|
||||||
|
phase = inSpaces
|
||||||
|
if word == "--" {
|
||||||
|
return line[pos:], words, nil
|
||||||
|
}
|
||||||
|
if blankOK || len(word) > 0 {
|
||||||
|
words = append(words, word)
|
||||||
|
}
|
||||||
|
word = ""
|
||||||
|
blankOK = false
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if ch == '\'' || ch == '"' {
|
||||||
|
quote = ch
|
||||||
|
blankOK = true
|
||||||
|
phase = inQuote
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if ch == '\\' {
|
||||||
|
if pos+1 == len(line) {
|
||||||
|
continue // just skip \ at end
|
||||||
|
}
|
||||||
|
pos++
|
||||||
|
ch = rune(line[pos])
|
||||||
|
}
|
||||||
|
word += string(ch)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if phase == inQuote {
|
||||||
|
if ch == quote {
|
||||||
|
phase = inWord
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if ch == '\\' {
|
||||||
|
if pos+1 == len(line) {
|
||||||
|
phase = inWord
|
||||||
|
continue // just skip \ at end
|
||||||
|
}
|
||||||
|
pos++
|
||||||
|
ch = rune(line[pos])
|
||||||
|
}
|
||||||
|
word += string(ch)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", words, nil
|
||||||
|
}
|
238
vendor/github.com/moby/buildkit/frontend/dockerfile/shell/envVarTest
generated
vendored
Normal file
238
vendor/github.com/moby/buildkit/frontend/dockerfile/shell/envVarTest
generated
vendored
Normal file
|
@ -0,0 +1,238 @@
|
||||||
|
A|hello | hello
|
||||||
|
A|he'll'o | hello
|
||||||
|
A|he'llo | error
|
||||||
|
A|he\'llo | he'llo
|
||||||
|
A|he\\'llo | error
|
||||||
|
A|abc\tdef | abctdef
|
||||||
|
A|"abc\tdef" | abc\tdef
|
||||||
|
A|"abc\\tdef" | abc\tdef
|
||||||
|
A|'abc\tdef' | abc\tdef
|
||||||
|
A|hello\ | hello
|
||||||
|
A|hello\\ | hello\
|
||||||
|
A|"hello | error
|
||||||
|
A|"hello\" | error
|
||||||
|
A|"hel'lo" | hel'lo
|
||||||
|
A|'hello | error
|
||||||
|
A|'hello\' | hello\
|
||||||
|
A|'hello\there' | hello\there
|
||||||
|
A|'hello\\there' | hello\\there
|
||||||
|
A|"''" | ''
|
||||||
|
A|$. | $.
|
||||||
|
A|he$1x | hex
|
||||||
|
A|he$.x | he$.x
|
||||||
|
# Next one is different on Windows as $pwd==$PWD
|
||||||
|
U|he$pwd. | he.
|
||||||
|
W|he$pwd. | he/home.
|
||||||
|
A|he$PWD | he/home
|
||||||
|
A|he\$PWD | he$PWD
|
||||||
|
A|he\\$PWD | he\/home
|
||||||
|
A|"he\$PWD" | he$PWD
|
||||||
|
A|"he\\$PWD" | he\/home
|
||||||
|
A|\${} | ${}
|
||||||
|
A|\${}aaa | ${}aaa
|
||||||
|
A|he\${} | he${}
|
||||||
|
A|he\${}xx | he${}xx
|
||||||
|
A|${} | error
|
||||||
|
A|${}aaa | error
|
||||||
|
A|he${} | error
|
||||||
|
A|he${}xx | error
|
||||||
|
A|he${hi} | he
|
||||||
|
A|he${hi}xx | hexx
|
||||||
|
A|he${PWD} | he/home
|
||||||
|
A|he${.} | error
|
||||||
|
A|he${XXX:-000}xx | he000xx
|
||||||
|
A|he${PWD:-000}xx | he/homexx
|
||||||
|
A|he${XXX:-$PWD}xx | he/homexx
|
||||||
|
A|he${XXX:-${PWD:-yyy}}xx | he/homexx
|
||||||
|
A|he${XXX:-${YYY:-yyy}}xx | heyyyxx
|
||||||
|
A|he${XXX:YYY} | error
|
||||||
|
A|he${XXX?} | error
|
||||||
|
A|he${XXX:?} | error
|
||||||
|
A|he${PWD?} | he/home
|
||||||
|
A|he${PWD:?} | he/home
|
||||||
|
A|he${NULL?} | he
|
||||||
|
A|he${NULL:?} | error
|
||||||
|
A|he${XXX:+${PWD}}xx | hexx
|
||||||
|
A|he${PWD:+${XXX}}xx | hexx
|
||||||
|
A|he${PWD:+${SHELL}}xx | hebashxx
|
||||||
|
A|he${XXX:+000}xx | hexx
|
||||||
|
A|he${PWD:+000}xx | he000xx
|
||||||
|
A|'he${XX}' | he${XX}
|
||||||
|
A|"he${PWD}" | he/home
|
||||||
|
A|"he'$PWD'" | he'/home'
|
||||||
|
A|"$PWD" | /home
|
||||||
|
A|'$PWD' | $PWD
|
||||||
|
A|'\$PWD' | \$PWD
|
||||||
|
A|'"hello"' | "hello"
|
||||||
|
A|he\$PWD | he$PWD
|
||||||
|
A|"he\$PWD" | he$PWD
|
||||||
|
A|'he\$PWD' | he\$PWD
|
||||||
|
A|he${PWD | error
|
||||||
|
A|he${PWD:=000}xx | error
|
||||||
|
A|he${PWD:+${PWD}:}xx | he/home:xx
|
||||||
|
A|he${XXX:-\$PWD:}xx | he$PWD:xx
|
||||||
|
A|he${XXX:-\${PWD}z}xx | he${PWDz}xx
|
||||||
|
A|안녕하세요 | 안녕하세요
|
||||||
|
A|안'녕'하세요 | 안녕하세요
|
||||||
|
A|안'녕하세요 | error
|
||||||
|
A|안녕\'하세요 | 안녕'하세요
|
||||||
|
A|안\\'녕하세요 | error
|
||||||
|
A|안녕\t하세요 | 안녕t하세요
|
||||||
|
A|"안녕\t하세요" | 안녕\t하세요
|
||||||
|
A|'안녕\t하세요 | error
|
||||||
|
A|안녕하세요\ | 안녕하세요
|
||||||
|
A|안녕하세요\\ | 안녕하세요\
|
||||||
|
A|"안녕하세요 | error
|
||||||
|
A|"안녕하세요\" | error
|
||||||
|
A|"안녕'하세요" | 안녕'하세요
|
||||||
|
A|'안녕하세요 | error
|
||||||
|
A|'안녕하세요\' | 안녕하세요\
|
||||||
|
A|안녕$1x | 안녕x
|
||||||
|
A|안녕$.x | 안녕$.x
|
||||||
|
# Next one is different on Windows as $pwd==$PWD
|
||||||
|
U|안녕$pwd. | 안녕.
|
||||||
|
W|안녕$pwd. | 안녕/home.
|
||||||
|
A|안녕$PWD | 안녕/home
|
||||||
|
A|안녕\$PWD | 안녕$PWD
|
||||||
|
A|안녕\\$PWD | 안녕\/home
|
||||||
|
A|안녕\${} | 안녕${}
|
||||||
|
A|안녕\${}xx | 안녕${}xx
|
||||||
|
A|안녕${} | error
|
||||||
|
A|안녕${}xx | error
|
||||||
|
A|안녕${hi} | 안녕
|
||||||
|
A|안녕${hi}xx | 안녕xx
|
||||||
|
A|안녕${PWD} | 안녕/home
|
||||||
|
A|안녕${.} | error
|
||||||
|
A|안녕${XXX:-000}xx | 안녕000xx
|
||||||
|
A|안녕${PWD:-000}xx | 안녕/homexx
|
||||||
|
A|안녕${XXX:-$PWD}xx | 안녕/homexx
|
||||||
|
A|안녕${XXX:-${PWD:-yyy}}xx | 안녕/homexx
|
||||||
|
A|안녕${XXX:-${YYY:-yyy}}xx | 안녕yyyxx
|
||||||
|
A|안녕${XXX:YYY} | error
|
||||||
|
A|안녕${XXX:+${PWD}}xx | 안녕xx
|
||||||
|
A|안녕${PWD:+${XXX}}xx | 안녕xx
|
||||||
|
A|안녕${PWD:+${SHELL}}xx | 안녕bashxx
|
||||||
|
A|안녕${XXX:+000}xx | 안녕xx
|
||||||
|
A|안녕${PWD:+000}xx | 안녕000xx
|
||||||
|
A|'안녕${XX}' | 안녕${XX}
|
||||||
|
A|"안녕${PWD}" | 안녕/home
|
||||||
|
A|"안녕'$PWD'" | 안녕'/home'
|
||||||
|
A|'"안녕"' | "안녕"
|
||||||
|
A|안녕\$PWD | 안녕$PWD
|
||||||
|
A|"안녕\$PWD" | 안녕$PWD
|
||||||
|
A|'안녕\$PWD' | 안녕\$PWD
|
||||||
|
A|안녕${PWD | error
|
||||||
|
A|안녕${PWD:=000}xx | error
|
||||||
|
A|안녕${PWD:+${PWD}:}xx | 안녕/home:xx
|
||||||
|
A|안녕${XXX:-\$PWD:}xx | 안녕$PWD:xx
|
||||||
|
A|안녕${XXX:-\${PWD}z}xx | 안녕${PWDz}xx
|
||||||
|
A|$KOREAN | 한국어
|
||||||
|
A|안녕$KOREAN | 안녕한국어
|
||||||
|
A|${{aaa} | error
|
||||||
|
A|${aaa}} | }
|
||||||
|
A|${aaa | error
|
||||||
|
A|${{aaa:-bbb} | error
|
||||||
|
A|${aaa:-bbb}} | bbb}
|
||||||
|
A|${aaa:-bbb | error
|
||||||
|
A|${aaa:-bbb} | bbb
|
||||||
|
A|${aaa:-${bbb:-ccc}} | ccc
|
||||||
|
A|${aaa:-bbb ${foo} | error
|
||||||
|
A|${aaa:-bbb {foo} | bbb {foo
|
||||||
|
A|${:} | error
|
||||||
|
A|${:-bbb} | error
|
||||||
|
A|${:+bbb} | error
|
||||||
|
|
||||||
|
# Positional parameters won't be set:
|
||||||
|
# http://pubs.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_05_01
|
||||||
|
A|$1 |
|
||||||
|
A|${1} |
|
||||||
|
A|${1:+bbb} |
|
||||||
|
A|${1:-bbb} | bbb
|
||||||
|
A|$2 |
|
||||||
|
A|${2} |
|
||||||
|
A|${2:+bbb} |
|
||||||
|
A|${2:-bbb} | bbb
|
||||||
|
A|$3 |
|
||||||
|
A|${3} |
|
||||||
|
A|${3:+bbb} |
|
||||||
|
A|${3:-bbb} | bbb
|
||||||
|
A|$4 |
|
||||||
|
A|${4} |
|
||||||
|
A|${4:+bbb} |
|
||||||
|
A|${4:-bbb} | bbb
|
||||||
|
A|$5 |
|
||||||
|
A|${5} |
|
||||||
|
A|${5:+bbb} |
|
||||||
|
A|${5:-bbb} | bbb
|
||||||
|
A|$6 |
|
||||||
|
A|${6} |
|
||||||
|
A|${6:+bbb} |
|
||||||
|
A|${6:-bbb} | bbb
|
||||||
|
A|$7 |
|
||||||
|
A|${7} |
|
||||||
|
A|${7:+bbb} |
|
||||||
|
A|${7:-bbb} | bbb
|
||||||
|
A|$8 |
|
||||||
|
A|${8} |
|
||||||
|
A|${8:+bbb} |
|
||||||
|
A|${8:-bbb} | bbb
|
||||||
|
A|$9 |
|
||||||
|
A|${9} |
|
||||||
|
A|${9:+bbb} |
|
||||||
|
A|${9:-bbb} | bbb
|
||||||
|
A|$999 |
|
||||||
|
A|${999} |
|
||||||
|
A|${999:+bbb} |
|
||||||
|
A|${999:-bbb} | bbb
|
||||||
|
A|$999aaa | aaa
|
||||||
|
A|${999}aaa | aaa
|
||||||
|
A|${999:+bbb}aaa | aaa
|
||||||
|
A|${999:-bbb}aaa | bbbaaa
|
||||||
|
A|$001 |
|
||||||
|
A|${001} |
|
||||||
|
A|${001:+bbb} |
|
||||||
|
A|${001:-bbb} | bbb
|
||||||
|
A|$001aaa | aaa
|
||||||
|
A|${001}aaa | aaa
|
||||||
|
A|${001:+bbb}aaa | aaa
|
||||||
|
A|${001:-bbb}aaa | bbbaaa
|
||||||
|
|
||||||
|
# Special parameters won't be set in the Dockerfile:
|
||||||
|
# http://pubs.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_05_02
|
||||||
|
A|$@ |
|
||||||
|
A|${@} |
|
||||||
|
A|${@:+bbb} |
|
||||||
|
A|${@:-bbb} | bbb
|
||||||
|
A|$@@@ | @@
|
||||||
|
A|$@aaa | aaa
|
||||||
|
A|${@}aaa | aaa
|
||||||
|
A|${@:+bbb}aaa | aaa
|
||||||
|
A|${@:-bbb}aaa | bbbaaa
|
||||||
|
A|$* |
|
||||||
|
A|${*} |
|
||||||
|
A|${*:+bbb} |
|
||||||
|
A|${*:-bbb} | bbb
|
||||||
|
A|$# |
|
||||||
|
A|${#} |
|
||||||
|
A|${#:+bbb} |
|
||||||
|
A|${#:-bbb} | bbb
|
||||||
|
A|$? |
|
||||||
|
A|${?} |
|
||||||
|
A|${?:+bbb} |
|
||||||
|
A|${?:-bbb} | bbb
|
||||||
|
A|$- |
|
||||||
|
A|${-} |
|
||||||
|
A|${-:+bbb} |
|
||||||
|
A|${-:-bbb} | bbb
|
||||||
|
A|$$ |
|
||||||
|
A|${$} |
|
||||||
|
A|${$:+bbb} |
|
||||||
|
A|${$:-bbb} | bbb
|
||||||
|
A|$! |
|
||||||
|
A|${!} |
|
||||||
|
A|${!:+bbb} |
|
||||||
|
A|${!:-bbb} | bbb
|
||||||
|
A|$0 |
|
||||||
|
A|${0} |
|
||||||
|
A|${0:+bbb} |
|
||||||
|
A|${0:-bbb} | bbb
|
11
vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_unix.go
generated
vendored
Normal file
11
vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_unix.go
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
//go:build !windows
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package shell
|
||||||
|
|
||||||
|
// EqualEnvKeys compare two strings and returns true if they are equal.
|
||||||
|
// On Unix this comparison is case sensitive.
|
||||||
|
// On Windows this comparison is case insensitive.
|
||||||
|
func EqualEnvKeys(from, to string) bool {
|
||||||
|
return from == to
|
||||||
|
}
|
10
vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_windows.go
generated
vendored
Normal file
10
vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
package shell
|
||||||
|
|
||||||
|
import "strings"
|
||||||
|
|
||||||
|
// EqualEnvKeys compare two strings and returns true if they are equal.
|
||||||
|
// On Unix this comparison is case sensitive.
|
||||||
|
// On Windows this comparison is case insensitive.
|
||||||
|
func EqualEnvKeys(from, to string) bool {
|
||||||
|
return strings.ToUpper(from) == strings.ToUpper(to)
|
||||||
|
}
|
|
@ -0,0 +1,499 @@
|
||||||
|
package shell
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"text/scanner"
|
||||||
|
"unicode"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Lex performs shell word splitting and variable expansion.
|
||||||
|
//
|
||||||
|
// Lex takes a string and an array of env variables and
|
||||||
|
// process all quotes (" and ') as well as $xxx and ${xxx} env variable
|
||||||
|
// tokens. Tries to mimic bash shell process.
|
||||||
|
// It doesn't support all flavors of ${xx:...} formats but new ones can
|
||||||
|
// be added by adding code to the "special ${} format processing" section
|
||||||
|
type Lex struct {
|
||||||
|
escapeToken rune
|
||||||
|
RawQuotes bool
|
||||||
|
RawEscapes bool
|
||||||
|
SkipProcessQuotes bool
|
||||||
|
SkipUnsetEnv bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewLex creates a new Lex which uses escapeToken to escape quotes.
|
||||||
|
func NewLex(escapeToken rune) *Lex {
|
||||||
|
return &Lex{escapeToken: escapeToken}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProcessWord will use the 'env' list of environment variables,
|
||||||
|
// and replace any env var references in 'word'.
|
||||||
|
func (s *Lex) ProcessWord(word string, env []string) (string, error) {
|
||||||
|
word, _, err := s.process(word, BuildEnvs(env))
|
||||||
|
return word, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProcessWords will use the 'env' list of environment variables,
|
||||||
|
// and replace any env var references in 'word' then it will also
|
||||||
|
// return a slice of strings which represents the 'word'
|
||||||
|
// split up based on spaces - taking into account quotes. Note that
|
||||||
|
// this splitting is done **after** the env var substitutions are done.
|
||||||
|
// Note, each one is trimmed to remove leading and trailing spaces (unless
|
||||||
|
// they are quoted", but ProcessWord retains spaces between words.
|
||||||
|
func (s *Lex) ProcessWords(word string, env []string) ([]string, error) {
|
||||||
|
_, words, err := s.process(word, BuildEnvs(env))
|
||||||
|
return words, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProcessWordWithMap will use the 'env' list of environment variables,
|
||||||
|
// and replace any env var references in 'word'.
|
||||||
|
func (s *Lex) ProcessWordWithMap(word string, env map[string]string) (string, error) {
|
||||||
|
word, _, err := s.process(word, env)
|
||||||
|
return word, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProcessWordWithMatches will use the 'env' list of environment variables,
|
||||||
|
// replace any env var references in 'word' and return the env that were used.
|
||||||
|
func (s *Lex) ProcessWordWithMatches(word string, env map[string]string) (string, map[string]struct{}, error) {
|
||||||
|
sw := s.init(word, env)
|
||||||
|
word, _, err := sw.process(word)
|
||||||
|
return word, sw.matches, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Lex) ProcessWordsWithMap(word string, env map[string]string) ([]string, error) {
|
||||||
|
_, words, err := s.process(word, env)
|
||||||
|
return words, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Lex) init(word string, env map[string]string) *shellWord {
|
||||||
|
sw := &shellWord{
|
||||||
|
envs: env,
|
||||||
|
escapeToken: s.escapeToken,
|
||||||
|
skipUnsetEnv: s.SkipUnsetEnv,
|
||||||
|
skipProcessQuotes: s.SkipProcessQuotes,
|
||||||
|
rawQuotes: s.RawQuotes,
|
||||||
|
rawEscapes: s.RawEscapes,
|
||||||
|
matches: make(map[string]struct{}),
|
||||||
|
}
|
||||||
|
sw.scanner.Init(strings.NewReader(word))
|
||||||
|
return sw
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Lex) process(word string, env map[string]string) (string, []string, error) {
|
||||||
|
sw := s.init(word, env)
|
||||||
|
return sw.process(word)
|
||||||
|
}
|
||||||
|
|
||||||
|
type shellWord struct {
|
||||||
|
scanner scanner.Scanner
|
||||||
|
envs map[string]string
|
||||||
|
escapeToken rune
|
||||||
|
rawQuotes bool
|
||||||
|
rawEscapes bool
|
||||||
|
skipUnsetEnv bool
|
||||||
|
skipProcessQuotes bool
|
||||||
|
matches map[string]struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sw *shellWord) process(source string) (string, []string, error) {
|
||||||
|
word, words, err := sw.processStopOn(scanner.EOF)
|
||||||
|
if err != nil {
|
||||||
|
err = errors.Wrapf(err, "failed to process %q", source)
|
||||||
|
}
|
||||||
|
return word, words, err
|
||||||
|
}
|
||||||
|
|
||||||
|
type wordsStruct struct {
|
||||||
|
word string
|
||||||
|
words []string
|
||||||
|
inWord bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *wordsStruct) addChar(ch rune) {
|
||||||
|
if unicode.IsSpace(ch) && w.inWord {
|
||||||
|
if len(w.word) != 0 {
|
||||||
|
w.words = append(w.words, w.word)
|
||||||
|
w.word = ""
|
||||||
|
w.inWord = false
|
||||||
|
}
|
||||||
|
} else if !unicode.IsSpace(ch) {
|
||||||
|
w.addRawChar(ch)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *wordsStruct) addRawChar(ch rune) {
|
||||||
|
w.word += string(ch)
|
||||||
|
w.inWord = true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *wordsStruct) addString(str string) {
|
||||||
|
for _, ch := range str {
|
||||||
|
w.addChar(ch)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *wordsStruct) addRawString(str string) {
|
||||||
|
w.word += str
|
||||||
|
w.inWord = true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *wordsStruct) getWords() []string {
|
||||||
|
if len(w.word) > 0 {
|
||||||
|
w.words = append(w.words, w.word)
|
||||||
|
|
||||||
|
// Just in case we're called again by mistake
|
||||||
|
w.word = ""
|
||||||
|
w.inWord = false
|
||||||
|
}
|
||||||
|
return w.words
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process the word, starting at 'pos', and stop when we get to the
|
||||||
|
// end of the word or the 'stopChar' character
|
||||||
|
func (sw *shellWord) processStopOn(stopChar rune) (string, []string, error) {
|
||||||
|
var result bytes.Buffer
|
||||||
|
var words wordsStruct
|
||||||
|
|
||||||
|
var charFuncMapping = map[rune]func() (string, error){
|
||||||
|
'$': sw.processDollar,
|
||||||
|
}
|
||||||
|
if !sw.skipProcessQuotes {
|
||||||
|
charFuncMapping['\''] = sw.processSingleQuote
|
||||||
|
charFuncMapping['"'] = sw.processDoubleQuote
|
||||||
|
}
|
||||||
|
|
||||||
|
for sw.scanner.Peek() != scanner.EOF {
|
||||||
|
ch := sw.scanner.Peek()
|
||||||
|
|
||||||
|
if stopChar != scanner.EOF && ch == stopChar {
|
||||||
|
sw.scanner.Next()
|
||||||
|
return result.String(), words.getWords(), nil
|
||||||
|
}
|
||||||
|
if fn, ok := charFuncMapping[ch]; ok {
|
||||||
|
// Call special processing func for certain chars
|
||||||
|
tmp, err := fn()
|
||||||
|
if err != nil {
|
||||||
|
return "", []string{}, err
|
||||||
|
}
|
||||||
|
result.WriteString(tmp)
|
||||||
|
|
||||||
|
if ch == rune('$') {
|
||||||
|
words.addString(tmp)
|
||||||
|
} else {
|
||||||
|
words.addRawString(tmp)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Not special, just add it to the result
|
||||||
|
ch = sw.scanner.Next()
|
||||||
|
|
||||||
|
if ch == sw.escapeToken {
|
||||||
|
if sw.rawEscapes {
|
||||||
|
words.addRawChar(ch)
|
||||||
|
result.WriteRune(ch)
|
||||||
|
}
|
||||||
|
|
||||||
|
// '\' (default escape token, but ` allowed) escapes, except end of line
|
||||||
|
ch = sw.scanner.Next()
|
||||||
|
|
||||||
|
if ch == scanner.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
words.addRawChar(ch)
|
||||||
|
} else {
|
||||||
|
words.addChar(ch)
|
||||||
|
}
|
||||||
|
|
||||||
|
result.WriteRune(ch)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if stopChar != scanner.EOF {
|
||||||
|
return "", []string{}, errors.Errorf("unexpected end of statement while looking for matching %s", string(stopChar))
|
||||||
|
}
|
||||||
|
return result.String(), words.getWords(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sw *shellWord) processSingleQuote() (string, error) {
|
||||||
|
// All chars between single quotes are taken as-is
|
||||||
|
// Note, you can't escape '
|
||||||
|
//
|
||||||
|
// From the "sh" man page:
|
||||||
|
// Single Quotes
|
||||||
|
// Enclosing characters in single quotes preserves the literal meaning of
|
||||||
|
// all the characters (except single quotes, making it impossible to put
|
||||||
|
// single-quotes in a single-quoted string).
|
||||||
|
|
||||||
|
var result bytes.Buffer
|
||||||
|
|
||||||
|
ch := sw.scanner.Next()
|
||||||
|
if sw.rawQuotes {
|
||||||
|
result.WriteRune(ch)
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
ch = sw.scanner.Next()
|
||||||
|
switch ch {
|
||||||
|
case scanner.EOF:
|
||||||
|
return "", errors.New("unexpected end of statement while looking for matching single-quote")
|
||||||
|
case '\'':
|
||||||
|
if sw.rawQuotes {
|
||||||
|
result.WriteRune(ch)
|
||||||
|
}
|
||||||
|
return result.String(), nil
|
||||||
|
}
|
||||||
|
result.WriteRune(ch)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sw *shellWord) processDoubleQuote() (string, error) {
|
||||||
|
// All chars up to the next " are taken as-is, even ', except any $ chars
|
||||||
|
// But you can escape " with a \ (or ` if escape token set accordingly)
|
||||||
|
//
|
||||||
|
// From the "sh" man page:
|
||||||
|
// Double Quotes
|
||||||
|
// Enclosing characters within double quotes preserves the literal meaning
|
||||||
|
// of all characters except dollarsign ($), backquote (`), and backslash
|
||||||
|
// (\). The backslash inside double quotes is historically weird, and
|
||||||
|
// serves to quote only the following characters:
|
||||||
|
// $ ` " \ <newline>.
|
||||||
|
// Otherwise it remains literal.
|
||||||
|
|
||||||
|
var result bytes.Buffer
|
||||||
|
|
||||||
|
ch := sw.scanner.Next()
|
||||||
|
if sw.rawQuotes {
|
||||||
|
result.WriteRune(ch)
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
switch sw.scanner.Peek() {
|
||||||
|
case scanner.EOF:
|
||||||
|
return "", errors.New("unexpected end of statement while looking for matching double-quote")
|
||||||
|
case '"':
|
||||||
|
ch := sw.scanner.Next()
|
||||||
|
if sw.rawQuotes {
|
||||||
|
result.WriteRune(ch)
|
||||||
|
}
|
||||||
|
return result.String(), nil
|
||||||
|
case '$':
|
||||||
|
value, err := sw.processDollar()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
result.WriteString(value)
|
||||||
|
default:
|
||||||
|
ch := sw.scanner.Next()
|
||||||
|
if ch == sw.escapeToken {
|
||||||
|
if sw.rawEscapes {
|
||||||
|
result.WriteRune(ch)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch sw.scanner.Peek() {
|
||||||
|
case scanner.EOF:
|
||||||
|
// Ignore \ at end of word
|
||||||
|
continue
|
||||||
|
case '"', '$', sw.escapeToken:
|
||||||
|
// These chars can be escaped, all other \'s are left as-is
|
||||||
|
// Note: for now don't do anything special with ` chars.
|
||||||
|
// Not sure what to do with them anyway since we're not going
|
||||||
|
// to execute the text in there (not now anyway).
|
||||||
|
ch = sw.scanner.Next()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
result.WriteRune(ch)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sw *shellWord) processDollar() (string, error) {
|
||||||
|
sw.scanner.Next()
|
||||||
|
|
||||||
|
// $xxx case
|
||||||
|
if sw.scanner.Peek() != '{' {
|
||||||
|
name := sw.processName()
|
||||||
|
if name == "" {
|
||||||
|
return "$", nil
|
||||||
|
}
|
||||||
|
value, found := sw.getEnv(name)
|
||||||
|
if !found && sw.skipUnsetEnv {
|
||||||
|
return "$" + name, nil
|
||||||
|
}
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
sw.scanner.Next()
|
||||||
|
switch sw.scanner.Peek() {
|
||||||
|
case scanner.EOF:
|
||||||
|
return "", errors.New("syntax error: missing '}'")
|
||||||
|
case '{', '}', ':':
|
||||||
|
// Invalid ${{xx}, ${:xx}, ${:}. ${} case
|
||||||
|
return "", errors.New("syntax error: bad substitution")
|
||||||
|
}
|
||||||
|
name := sw.processName()
|
||||||
|
ch := sw.scanner.Next()
|
||||||
|
switch ch {
|
||||||
|
case '}':
|
||||||
|
// Normal ${xx} case
|
||||||
|
value, found := sw.getEnv(name)
|
||||||
|
if !found && sw.skipUnsetEnv {
|
||||||
|
return fmt.Sprintf("${%s}", name), nil
|
||||||
|
}
|
||||||
|
return value, nil
|
||||||
|
case '?':
|
||||||
|
word, _, err := sw.processStopOn('}')
|
||||||
|
if err != nil {
|
||||||
|
if sw.scanner.Peek() == scanner.EOF {
|
||||||
|
return "", errors.New("syntax error: missing '}'")
|
||||||
|
}
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
newValue, found := sw.getEnv(name)
|
||||||
|
if !found {
|
||||||
|
if sw.skipUnsetEnv {
|
||||||
|
return fmt.Sprintf("${%s?%s}", name, word), nil
|
||||||
|
}
|
||||||
|
message := "is not allowed to be unset"
|
||||||
|
if word != "" {
|
||||||
|
message = word
|
||||||
|
}
|
||||||
|
return "", errors.Errorf("%s: %s", name, message)
|
||||||
|
}
|
||||||
|
return newValue, nil
|
||||||
|
case ':':
|
||||||
|
// Special ${xx:...} format processing
|
||||||
|
// Yes it allows for recursive $'s in the ... spot
|
||||||
|
modifier := sw.scanner.Next()
|
||||||
|
|
||||||
|
word, _, err := sw.processStopOn('}')
|
||||||
|
if err != nil {
|
||||||
|
if sw.scanner.Peek() == scanner.EOF {
|
||||||
|
return "", errors.New("syntax error: missing '}'")
|
||||||
|
}
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Grab the current value of the variable in question so we
|
||||||
|
// can use to to determine what to do based on the modifier
|
||||||
|
newValue, found := sw.getEnv(name)
|
||||||
|
|
||||||
|
switch modifier {
|
||||||
|
case '+':
|
||||||
|
if newValue != "" {
|
||||||
|
newValue = word
|
||||||
|
}
|
||||||
|
if !found && sw.skipUnsetEnv {
|
||||||
|
return fmt.Sprintf("${%s:%s%s}", name, string(modifier), word), nil
|
||||||
|
}
|
||||||
|
return newValue, nil
|
||||||
|
|
||||||
|
case '-':
|
||||||
|
if newValue == "" {
|
||||||
|
newValue = word
|
||||||
|
}
|
||||||
|
if !found && sw.skipUnsetEnv {
|
||||||
|
return fmt.Sprintf("${%s:%s%s}", name, string(modifier), word), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return newValue, nil
|
||||||
|
|
||||||
|
case '?':
|
||||||
|
if !found {
|
||||||
|
if sw.skipUnsetEnv {
|
||||||
|
return fmt.Sprintf("${%s:%s%s}", name, string(modifier), word), nil
|
||||||
|
}
|
||||||
|
message := "is not allowed to be unset"
|
||||||
|
if word != "" {
|
||||||
|
message = word
|
||||||
|
}
|
||||||
|
return "", errors.Errorf("%s: %s", name, message)
|
||||||
|
}
|
||||||
|
if newValue == "" {
|
||||||
|
message := "is not allowed to be empty"
|
||||||
|
if word != "" {
|
||||||
|
message = word
|
||||||
|
}
|
||||||
|
return "", errors.Errorf("%s: %s", name, message)
|
||||||
|
}
|
||||||
|
return newValue, nil
|
||||||
|
|
||||||
|
default:
|
||||||
|
return "", errors.Errorf("unsupported modifier (%c) in substitution", modifier)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "", errors.Errorf("missing ':' in substitution")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sw *shellWord) processName() string {
|
||||||
|
// Read in a name (alphanumeric or _)
|
||||||
|
// If it starts with a numeric then just return $#
|
||||||
|
var name bytes.Buffer
|
||||||
|
|
||||||
|
for sw.scanner.Peek() != scanner.EOF {
|
||||||
|
ch := sw.scanner.Peek()
|
||||||
|
if name.Len() == 0 && unicode.IsDigit(ch) {
|
||||||
|
for sw.scanner.Peek() != scanner.EOF && unicode.IsDigit(sw.scanner.Peek()) {
|
||||||
|
// Keep reading until the first non-digit character, or EOF
|
||||||
|
ch = sw.scanner.Next()
|
||||||
|
name.WriteRune(ch)
|
||||||
|
}
|
||||||
|
return name.String()
|
||||||
|
}
|
||||||
|
if name.Len() == 0 && isSpecialParam(ch) {
|
||||||
|
ch = sw.scanner.Next()
|
||||||
|
return string(ch)
|
||||||
|
}
|
||||||
|
if !unicode.IsLetter(ch) && !unicode.IsDigit(ch) && ch != '_' {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
ch = sw.scanner.Next()
|
||||||
|
name.WriteRune(ch)
|
||||||
|
}
|
||||||
|
|
||||||
|
return name.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// isSpecialParam checks if the provided character is a special parameters,
|
||||||
|
// as defined in http://pubs.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_05_02
|
||||||
|
func isSpecialParam(char rune) bool {
|
||||||
|
switch char {
|
||||||
|
case '@', '*', '#', '?', '-', '$', '!', '0':
|
||||||
|
// Special parameters
|
||||||
|
// http://pubs.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_05_02
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sw *shellWord) getEnv(name string) (string, bool) {
|
||||||
|
for key, value := range sw.envs {
|
||||||
|
if EqualEnvKeys(name, key) {
|
||||||
|
sw.matches[name] = struct{}{}
|
||||||
|
return value, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
|
||||||
|
func BuildEnvs(env []string) map[string]string {
|
||||||
|
envs := map[string]string{}
|
||||||
|
|
||||||
|
for _, e := range env {
|
||||||
|
i := strings.Index(e, "=")
|
||||||
|
|
||||||
|
if i < 0 {
|
||||||
|
envs[e] = ""
|
||||||
|
} else {
|
||||||
|
k := e[:i]
|
||||||
|
v := e[i+1:]
|
||||||
|
|
||||||
|
// overwrite value if key already exists
|
||||||
|
envs[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return envs
|
||||||
|
}
|
30
vendor/github.com/moby/buildkit/frontend/dockerfile/shell/wordsTest
generated
vendored
Normal file
30
vendor/github.com/moby/buildkit/frontend/dockerfile/shell/wordsTest
generated
vendored
Normal file
|
@ -0,0 +1,30 @@
|
||||||
|
hello | hello
|
||||||
|
hello${hi}bye | hellobye
|
||||||
|
ENV hi=hi
|
||||||
|
hello${hi}bye | hellohibye
|
||||||
|
ENV space=abc def
|
||||||
|
hello${space}bye | helloabc,defbye
|
||||||
|
hello"${space}"bye | helloabc defbye
|
||||||
|
hello "${space}"bye | hello,abc defbye
|
||||||
|
ENV leading= ab c
|
||||||
|
hello${leading}def | hello,ab,cdef
|
||||||
|
hello"${leading}" def | hello ab c,def
|
||||||
|
hello"${leading}" | hello ab c
|
||||||
|
hello${leading} | hello,ab,c
|
||||||
|
# next line MUST have 3 trailing spaces, don't erase them!
|
||||||
|
ENV trailing=ab c
|
||||||
|
hello${trailing} | helloab,c
|
||||||
|
hello${trailing}d | helloab,c,d
|
||||||
|
hello"${trailing}"d | helloab c d
|
||||||
|
# next line MUST have 3 trailing spaces, don't erase them!
|
||||||
|
hel"lo${trailing}" | helloab c
|
||||||
|
hello" there " | hello there
|
||||||
|
hello there | hello,there
|
||||||
|
hello\ there | hello there
|
||||||
|
hello" there | error
|
||||||
|
hello\" there | hello",there
|
||||||
|
hello"\\there" | hello\there
|
||||||
|
hello"\there" | hello\there
|
||||||
|
hello'\\there' | hello\\there
|
||||||
|
hello'\there' | hello\there
|
||||||
|
hello'$there' | hello$there
|
|
@ -0,0 +1,3 @@
|
||||||
|
package stack
|
||||||
|
|
||||||
|
//go:generate protoc -I=. -I=../../vendor/ --go_out=. --go_opt=paths=source_relative --go_opt=Mstack.proto=/util/stack stack.proto
|
|
@ -0,0 +1,182 @@
|
||||||
|
package stack
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
io "io"
|
||||||
|
"os"
|
||||||
|
"runtime"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/containerd/typeurl"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
var helpers map[string]struct{}
|
||||||
|
var helpersMu sync.RWMutex
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
typeurl.Register((*Stack)(nil), "github.com/moby/buildkit", "stack.Stack+json")
|
||||||
|
|
||||||
|
helpers = map[string]struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
var version string
|
||||||
|
var revision string
|
||||||
|
|
||||||
|
func SetVersionInfo(v, r string) {
|
||||||
|
version = v
|
||||||
|
revision = r
|
||||||
|
}
|
||||||
|
|
||||||
|
func Helper() {
|
||||||
|
var pc [1]uintptr
|
||||||
|
n := runtime.Callers(2, pc[:])
|
||||||
|
if n == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
frames := runtime.CallersFrames(pc[:n])
|
||||||
|
frame, _ := frames.Next()
|
||||||
|
helpersMu.Lock()
|
||||||
|
helpers[frame.Function] = struct{}{}
|
||||||
|
helpersMu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func Traces(err error) []*Stack {
|
||||||
|
var st []*Stack
|
||||||
|
|
||||||
|
wrapped, ok := err.(interface {
|
||||||
|
Unwrap() error
|
||||||
|
})
|
||||||
|
if ok {
|
||||||
|
st = Traces(wrapped.Unwrap())
|
||||||
|
}
|
||||||
|
|
||||||
|
if ste, ok := err.(interface {
|
||||||
|
StackTrace() errors.StackTrace
|
||||||
|
}); ok {
|
||||||
|
st = append(st, convertStack(ste.StackTrace()))
|
||||||
|
}
|
||||||
|
|
||||||
|
if ste, ok := err.(interface {
|
||||||
|
StackTrace() *Stack
|
||||||
|
}); ok {
|
||||||
|
st = append(st, ste.StackTrace())
|
||||||
|
}
|
||||||
|
|
||||||
|
return st
|
||||||
|
}
|
||||||
|
|
||||||
|
func Enable(err error) error {
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
Helper()
|
||||||
|
if !hasLocalStackTrace(err) {
|
||||||
|
return errors.WithStack(err)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func Wrap(err error, s Stack) error {
|
||||||
|
return &withStack{stack: s, error: err}
|
||||||
|
}
|
||||||
|
|
||||||
|
func hasLocalStackTrace(err error) bool {
|
||||||
|
wrapped, ok := err.(interface {
|
||||||
|
Unwrap() error
|
||||||
|
})
|
||||||
|
if ok && hasLocalStackTrace(wrapped.Unwrap()) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
_, ok = err.(interface {
|
||||||
|
StackTrace() errors.StackTrace
|
||||||
|
})
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
func Formatter(err error) fmt.Formatter {
|
||||||
|
return &formatter{err}
|
||||||
|
}
|
||||||
|
|
||||||
|
type formatter struct {
|
||||||
|
error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *formatter) Format(s fmt.State, verb rune) {
|
||||||
|
if w.error == nil {
|
||||||
|
fmt.Fprintf(s, "%v", w.error)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
switch verb {
|
||||||
|
case 'v':
|
||||||
|
if s.Flag('+') {
|
||||||
|
fmt.Fprintf(s, "%s\n", w.Error())
|
||||||
|
for _, stack := range Traces(w.error) {
|
||||||
|
fmt.Fprintf(s, "%d %s %s\n", stack.Pid, stack.Version, strings.Join(stack.Cmdline, " "))
|
||||||
|
for _, f := range stack.Frames {
|
||||||
|
fmt.Fprintf(s, "%s\n\t%s:%d\n", f.Name, f.File, f.Line)
|
||||||
|
}
|
||||||
|
fmt.Fprintln(s)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fallthrough
|
||||||
|
case 's':
|
||||||
|
io.WriteString(s, w.Error())
|
||||||
|
case 'q':
|
||||||
|
fmt.Fprintf(s, "%q", w.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func convertStack(s errors.StackTrace) *Stack {
|
||||||
|
var out Stack
|
||||||
|
helpersMu.RLock()
|
||||||
|
defer helpersMu.RUnlock()
|
||||||
|
for _, f := range s {
|
||||||
|
dt, err := f.MarshalText()
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
p := strings.SplitN(string(dt), " ", 2)
|
||||||
|
if len(p) != 2 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if _, ok := helpers[p[0]]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
idx := strings.LastIndexByte(p[1], ':')
|
||||||
|
if idx == -1 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
line, err := strconv.Atoi(p[1][idx+1:])
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
out.Frames = append(out.Frames, &Frame{
|
||||||
|
Name: p[0],
|
||||||
|
File: p[1][:idx],
|
||||||
|
Line: int32(line),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
out.Cmdline = os.Args
|
||||||
|
out.Pid = int32(os.Getpid())
|
||||||
|
out.Version = version
|
||||||
|
out.Revision = revision
|
||||||
|
return &out
|
||||||
|
}
|
||||||
|
|
||||||
|
type withStack struct {
|
||||||
|
stack Stack
|
||||||
|
error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *withStack) Unwrap() error {
|
||||||
|
return e.error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *withStack) StackTrace() *Stack {
|
||||||
|
return &e.stack
|
||||||
|
}
|
|
@ -0,0 +1,172 @@
|
||||||
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
|
// source: stack.proto
|
||||||
|
|
||||||
|
package stack
|
||||||
|
|
||||||
|
import (
|
||||||
|
fmt "fmt"
|
||||||
|
proto "github.com/golang/protobuf/proto"
|
||||||
|
math "math"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
var _ = proto.Marshal
|
||||||
|
var _ = fmt.Errorf
|
||||||
|
var _ = math.Inf
|
||||||
|
|
||||||
|
// This is a compile-time assertion to ensure that this generated file
|
||||||
|
// is compatible with the proto package it is being compiled against.
|
||||||
|
// A compilation error at this line likely means your copy of the
|
||||||
|
// proto package needs to be updated.
|
||||||
|
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||||
|
|
||||||
|
type Stack struct {
|
||||||
|
Frames []*Frame `protobuf:"bytes,1,rep,name=frames,proto3" json:"frames,omitempty"`
|
||||||
|
Cmdline []string `protobuf:"bytes,2,rep,name=cmdline,proto3" json:"cmdline,omitempty"`
|
||||||
|
Pid int32 `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"`
|
||||||
|
Version string `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"`
|
||||||
|
Revision string `protobuf:"bytes,5,opt,name=revision,proto3" json:"revision,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Stack) Reset() { *m = Stack{} }
|
||||||
|
func (m *Stack) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*Stack) ProtoMessage() {}
|
||||||
|
func (*Stack) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_b44c07feb2ca0a5a, []int{0}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Stack) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_Stack.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *Stack) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_Stack.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (m *Stack) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_Stack.Merge(m, src)
|
||||||
|
}
|
||||||
|
func (m *Stack) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_Stack.Size(m)
|
||||||
|
}
|
||||||
|
func (m *Stack) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_Stack.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_Stack proto.InternalMessageInfo
|
||||||
|
|
||||||
|
func (m *Stack) GetFrames() []*Frame {
|
||||||
|
if m != nil {
|
||||||
|
return m.Frames
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Stack) GetCmdline() []string {
|
||||||
|
if m != nil {
|
||||||
|
return m.Cmdline
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Stack) GetPid() int32 {
|
||||||
|
if m != nil {
|
||||||
|
return m.Pid
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Stack) GetVersion() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.Version
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Stack) GetRevision() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.Revision
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
type Frame struct {
|
||||||
|
Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"`
|
||||||
|
File string `protobuf:"bytes,2,opt,name=File,proto3" json:"File,omitempty"`
|
||||||
|
Line int32 `protobuf:"varint,3,opt,name=Line,proto3" json:"Line,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Frame) Reset() { *m = Frame{} }
|
||||||
|
func (m *Frame) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*Frame) ProtoMessage() {}
|
||||||
|
func (*Frame) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_b44c07feb2ca0a5a, []int{1}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Frame) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_Frame.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *Frame) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_Frame.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (m *Frame) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_Frame.Merge(m, src)
|
||||||
|
}
|
||||||
|
func (m *Frame) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_Frame.Size(m)
|
||||||
|
}
|
||||||
|
func (m *Frame) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_Frame.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_Frame proto.InternalMessageInfo
|
||||||
|
|
||||||
|
func (m *Frame) GetName() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.Name
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Frame) GetFile() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.File
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Frame) GetLine() int32 {
|
||||||
|
if m != nil {
|
||||||
|
return m.Line
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterType((*Stack)(nil), "stack.Stack")
|
||||||
|
proto.RegisterType((*Frame)(nil), "stack.Frame")
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterFile("stack.proto", fileDescriptor_b44c07feb2ca0a5a)
|
||||||
|
}
|
||||||
|
|
||||||
|
var fileDescriptor_b44c07feb2ca0a5a = []byte{
|
||||||
|
// 185 bytes of a gzipped FileDescriptorProto
|
||||||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x3c, 0x8f, 0x3d, 0xce, 0x82, 0x40,
|
||||||
|
0x10, 0x86, 0xb3, 0xdf, 0xb2, 0x7c, 0x3a, 0x58, 0x98, 0xa9, 0x36, 0x56, 0x1b, 0x62, 0x41, 0x45,
|
||||||
|
0xa1, 0x47, 0x30, 0xa1, 0x32, 0x16, 0x78, 0x02, 0x84, 0x35, 0xd9, 0xc8, 0x5f, 0x76, 0x09, 0xd7,
|
||||||
|
0xf0, 0xca, 0x66, 0x06, 0xb4, 0x7b, 0xde, 0x9f, 0xe4, 0x9d, 0x81, 0x24, 0x4c, 0x55, 0xfd, 0xca,
|
||||||
|
0x47, 0x3f, 0x4c, 0x03, 0x2a, 0x16, 0xe9, 0x5b, 0x80, 0xba, 0x13, 0xe1, 0x11, 0xe2, 0xa7, 0xaf,
|
||||||
|
0x3a, 0x1b, 0xb4, 0x30, 0x32, 0x4b, 0x4e, 0xbb, 0x7c, 0xa9, 0x17, 0x64, 0x96, 0x6b, 0x86, 0x1a,
|
||||||
|
0xfe, 0xeb, 0xae, 0x69, 0x5d, 0x6f, 0xf5, 0x9f, 0x91, 0xd9, 0xb6, 0xfc, 0x4a, 0xdc, 0x83, 0x1c,
|
||||||
|
0x5d, 0xa3, 0xa5, 0x11, 0x99, 0x2a, 0x09, 0xa9, 0x3b, 0x5b, 0x1f, 0xdc, 0xd0, 0xeb, 0xc8, 0x08,
|
||||||
|
0xea, 0xae, 0x12, 0x0f, 0xb0, 0xf1, 0x76, 0x76, 0x1c, 0x29, 0x8e, 0x7e, 0x3a, 0xbd, 0x80, 0xe2,
|
||||||
|
0x49, 0x44, 0x88, 0x6e, 0x55, 0x67, 0xb5, 0xe0, 0x02, 0x33, 0x79, 0x85, 0x6b, 0x69, 0x9b, 0x3d,
|
||||||
|
0x62, 0xf2, 0xae, 0x74, 0xcf, 0xb2, 0xcc, 0xfc, 0x88, 0xf9, 0xc9, 0xf3, 0x27, 0x00, 0x00, 0xff,
|
||||||
|
0xff, 0xfd, 0x2c, 0xbb, 0xfb, 0xf3, 0x00, 0x00, 0x00,
|
||||||
|
}
|
|
@ -0,0 +1,17 @@
|
||||||
|
syntax = "proto3";
|
||||||
|
|
||||||
|
package stack;
|
||||||
|
|
||||||
|
message Stack {
|
||||||
|
repeated Frame frames = 1;
|
||||||
|
repeated string cmdline = 2;
|
||||||
|
int32 pid = 3;
|
||||||
|
string version = 4;
|
||||||
|
string revision = 5;
|
||||||
|
}
|
||||||
|
|
||||||
|
message Frame {
|
||||||
|
string Name = 1;
|
||||||
|
string File = 2;
|
||||||
|
int32 Line = 3;
|
||||||
|
}
|
|
@ -9,3 +9,9 @@ test:
|
||||||
test-conformance:
|
test-conformance:
|
||||||
go test -v -tags conformance -timeout 45m ./dockerclient
|
go test -v -tags conformance -timeout 45m ./dockerclient
|
||||||
.PHONY: test-conformance
|
.PHONY: test-conformance
|
||||||
|
|
||||||
|
.PHONY: vendor
|
||||||
|
vendor:
|
||||||
|
GO111MODULE=on go mod tidy
|
||||||
|
GO111MODULE=on go mod vendor
|
||||||
|
GO111MODULE=on go mod verify
|
||||||
|
|
|
@ -13,6 +13,7 @@ import (
|
||||||
|
|
||||||
docker "github.com/fsouza/go-dockerclient"
|
docker "github.com/fsouza/go-dockerclient"
|
||||||
|
|
||||||
|
buildkitparser "github.com/moby/buildkit/frontend/dockerfile/parser"
|
||||||
"github.com/openshift/imagebuilder/dockerfile/command"
|
"github.com/openshift/imagebuilder/dockerfile/command"
|
||||||
"github.com/openshift/imagebuilder/dockerfile/parser"
|
"github.com/openshift/imagebuilder/dockerfile/parser"
|
||||||
)
|
)
|
||||||
|
@ -32,6 +33,17 @@ type Copy struct {
|
||||||
Chown string
|
Chown string
|
||||||
Chmod string
|
Chmod string
|
||||||
Checksum string
|
Checksum string
|
||||||
|
// Additional files which need to be created by executor for this
|
||||||
|
// instruction.
|
||||||
|
Files []File
|
||||||
|
}
|
||||||
|
|
||||||
|
// File defines if any additional file needs to be created
|
||||||
|
// by the executor instruction so that specified command
|
||||||
|
// can execute/copy the created file inside the build container.
|
||||||
|
type File struct {
|
||||||
|
Name string // Name of the new file.
|
||||||
|
Data string // Content of the file.
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run defines a run operation required in the container.
|
// Run defines a run operation required in the container.
|
||||||
|
@ -42,6 +54,9 @@ type Run struct {
|
||||||
Mounts []string
|
Mounts []string
|
||||||
// Network specifies the network mode to run the container with
|
// Network specifies the network mode to run the container with
|
||||||
Network string
|
Network string
|
||||||
|
// Additional files which need to be created by executor for this
|
||||||
|
// instruction.
|
||||||
|
Files []File
|
||||||
}
|
}
|
||||||
|
|
||||||
type Executor interface {
|
type Executor interface {
|
||||||
|
@ -395,7 +410,7 @@ func (b *Builder) Run(step *Step, exec Executor, noRunsRemaining bool) error {
|
||||||
if !ok {
|
if !ok {
|
||||||
return exec.UnrecognizedInstruction(step)
|
return exec.UnrecognizedInstruction(step)
|
||||||
}
|
}
|
||||||
if err := fn(b, step.Args, step.Attrs, step.Flags, step.Original); err != nil {
|
if err := fn(b, step.Args, step.Attrs, step.Flags, step.Original, step.Heredocs); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -575,7 +590,7 @@ func SplitBy(node *parser.Node, value string) []*parser.Node {
|
||||||
}
|
}
|
||||||
|
|
||||||
// StepFunc is invoked with the result of a resolved step.
|
// StepFunc is invoked with the result of a resolved step.
|
||||||
type StepFunc func(*Builder, []string, map[string]bool, []string, string) error
|
type StepFunc func(*Builder, []string, map[string]bool, []string, string, []buildkitparser.Heredoc) error
|
||||||
|
|
||||||
var evaluateTable = map[string]StepFunc{
|
var evaluateTable = map[string]StepFunc{
|
||||||
command.Env: env,
|
command.Env: env,
|
||||||
|
|
|
@ -22,6 +22,9 @@ import (
|
||||||
"github.com/containers/storage/pkg/regexp"
|
"github.com/containers/storage/pkg/regexp"
|
||||||
"github.com/openshift/imagebuilder/signal"
|
"github.com/openshift/imagebuilder/signal"
|
||||||
"github.com/openshift/imagebuilder/strslice"
|
"github.com/openshift/imagebuilder/strslice"
|
||||||
|
|
||||||
|
buildkitparser "github.com/moby/buildkit/frontend/dockerfile/parser"
|
||||||
|
buildkitshell "github.com/moby/buildkit/frontend/dockerfile/shell"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -53,7 +56,7 @@ func init() {
|
||||||
//
|
//
|
||||||
// Sets the environment variable foo to bar, also makes interpolation
|
// Sets the environment variable foo to bar, also makes interpolation
|
||||||
// in the dockerfile available from the next statement on via ${foo}.
|
// in the dockerfile available from the next statement on via ${foo}.
|
||||||
func env(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
|
func env(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string, heredocs []buildkitparser.Heredoc) error {
|
||||||
if len(args) == 0 {
|
if len(args) == 0 {
|
||||||
return errAtLeastOneArgument("ENV")
|
return errAtLeastOneArgument("ENV")
|
||||||
}
|
}
|
||||||
|
@ -94,7 +97,7 @@ func env(b *Builder, args []string, attributes map[string]bool, flagArgs []strin
|
||||||
// MAINTAINER some text <maybe@an.email.address>
|
// MAINTAINER some text <maybe@an.email.address>
|
||||||
//
|
//
|
||||||
// Sets the maintainer metadata.
|
// Sets the maintainer metadata.
|
||||||
func maintainer(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
|
func maintainer(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string, heredocs []buildkitparser.Heredoc) error {
|
||||||
if len(args) != 1 {
|
if len(args) != 1 {
|
||||||
return errExactlyOneArgument("MAINTAINER")
|
return errExactlyOneArgument("MAINTAINER")
|
||||||
}
|
}
|
||||||
|
@ -105,7 +108,7 @@ func maintainer(b *Builder, args []string, attributes map[string]bool, flagArgs
|
||||||
// LABEL some json data describing the image
|
// LABEL some json data describing the image
|
||||||
//
|
//
|
||||||
// Sets the Label variable foo to bar,
|
// Sets the Label variable foo to bar,
|
||||||
func label(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
|
func label(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string, heredocs []buildkitparser.Heredoc) error {
|
||||||
if len(args) == 0 {
|
if len(args) == 0 {
|
||||||
return errAtLeastOneArgument("LABEL")
|
return errAtLeastOneArgument("LABEL")
|
||||||
}
|
}
|
||||||
|
@ -127,11 +130,37 @@ func label(b *Builder, args []string, attributes map[string]bool, flagArgs []str
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func processHereDocs(originalInstruction string, heredocs []buildkitparser.Heredoc, args []string) ([]File, error) {
|
||||||
|
var files []File
|
||||||
|
for _, heredoc := range heredocs {
|
||||||
|
var err error
|
||||||
|
content := heredoc.Content
|
||||||
|
if heredoc.Chomp {
|
||||||
|
content = buildkitparser.ChompHeredocContent(content)
|
||||||
|
}
|
||||||
|
if heredoc.Expand {
|
||||||
|
shlex := buildkitshell.NewLex('\\')
|
||||||
|
shlex.RawQuotes = true
|
||||||
|
shlex.RawEscapes = true
|
||||||
|
content, err = shlex.ProcessWord(content, args)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
file := File{
|
||||||
|
Data: content,
|
||||||
|
Name: heredoc.Name,
|
||||||
|
}
|
||||||
|
files = append(files, file)
|
||||||
|
}
|
||||||
|
return files, nil
|
||||||
|
}
|
||||||
|
|
||||||
// ADD foo /path
|
// ADD foo /path
|
||||||
//
|
//
|
||||||
// Add the file 'foo' to '/path'. Tarball and Remote URL (git, http) handling
|
// Add the file 'foo' to '/path'. Tarball and Remote URL (git, http) handling
|
||||||
// exist here. If you do not wish to have this automatic handling, use COPY.
|
// exist here. If you do not wish to have this automatic handling, use COPY.
|
||||||
func add(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
|
func add(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string, heredocs []buildkitparser.Heredoc) error {
|
||||||
if len(args) < 2 {
|
if len(args) < 2 {
|
||||||
return errAtLeastTwoArgument("ADD")
|
return errAtLeastTwoArgument("ADD")
|
||||||
}
|
}
|
||||||
|
@ -167,20 +196,25 @@ func add(b *Builder, args []string, attributes map[string]bool, flagArgs []strin
|
||||||
return fmt.Errorf("ADD only supports the --chmod=<permissions>, --chown=<uid:gid>, and --checksum=<checksum> flags")
|
return fmt.Errorf("ADD only supports the --chmod=<permissions>, --chown=<uid:gid>, and --checksum=<checksum> flags")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
files, err := processHereDocs(original, heredocs, userArgs)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
b.PendingCopies = append(b.PendingCopies, Copy{
|
b.PendingCopies = append(b.PendingCopies, Copy{
|
||||||
Src: args[0:last],
|
Src: args[0:last],
|
||||||
Dest: dest,
|
Dest: dest,
|
||||||
Download: true,
|
Download: true,
|
||||||
Chown: chown,
|
Chown: chown,
|
||||||
Chmod: chmod,
|
Chmod: chmod,
|
||||||
Checksum: checksum})
|
Checksum: checksum,
|
||||||
|
Files: files})
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// COPY foo /path
|
// COPY foo /path
|
||||||
//
|
//
|
||||||
// Same as 'ADD' but without the tar and remote url handling.
|
// Same as 'ADD' but without the tar and remote url handling.
|
||||||
func dispatchCopy(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
|
func dispatchCopy(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string, heredocs []buildkitparser.Heredoc) error {
|
||||||
if len(args) < 2 {
|
if len(args) < 2 {
|
||||||
return errAtLeastTwoArgument("COPY")
|
return errAtLeastTwoArgument("COPY")
|
||||||
}
|
}
|
||||||
|
@ -210,14 +244,18 @@ func dispatchCopy(b *Builder, args []string, attributes map[string]bool, flagArg
|
||||||
return fmt.Errorf("COPY only supports the --chmod=<permissions> --chown=<uid:gid> and the --from=<image|stage> flags")
|
return fmt.Errorf("COPY only supports the --chmod=<permissions> --chown=<uid:gid> and the --from=<image|stage> flags")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
b.PendingCopies = append(b.PendingCopies, Copy{From: from, Src: args[0:last], Dest: dest, Download: false, Chown: chown, Chmod: chmod})
|
files, err := processHereDocs(original, heredocs, userArgs)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
b.PendingCopies = append(b.PendingCopies, Copy{From: from, Src: args[0:last], Dest: dest, Download: false, Chown: chown, Chmod: chmod, Files: files})
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// FROM imagename
|
// FROM imagename
|
||||||
//
|
//
|
||||||
// This sets the image the dockerfile will build on top of.
|
// This sets the image the dockerfile will build on top of.
|
||||||
func from(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
|
func from(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string, heredocs []buildkitparser.Heredoc) error {
|
||||||
switch {
|
switch {
|
||||||
case len(args) == 1:
|
case len(args) == 1:
|
||||||
case len(args) == 3 && len(args[0]) > 0 && strings.EqualFold(args[1], "as") && len(args[2]) > 0:
|
case len(args) == 3 && len(args[0]) > 0 && strings.EqualFold(args[1], "as") && len(args[2]) > 0:
|
||||||
|
@ -282,7 +320,7 @@ func from(b *Builder, args []string, attributes map[string]bool, flagArgs []stri
|
||||||
// evaluator.go and comments around dispatch() in the same file explain the
|
// evaluator.go and comments around dispatch() in the same file explain the
|
||||||
// special cases. search for 'OnBuild' in internals.go for additional special
|
// special cases. search for 'OnBuild' in internals.go for additional special
|
||||||
// cases.
|
// cases.
|
||||||
func onbuild(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
|
func onbuild(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string, heredocs []buildkitparser.Heredoc) error {
|
||||||
if len(args) == 0 {
|
if len(args) == 0 {
|
||||||
return errAtLeastOneArgument("ONBUILD")
|
return errAtLeastOneArgument("ONBUILD")
|
||||||
}
|
}
|
||||||
|
@ -304,7 +342,7 @@ func onbuild(b *Builder, args []string, attributes map[string]bool, flagArgs []s
|
||||||
// WORKDIR /tmp
|
// WORKDIR /tmp
|
||||||
//
|
//
|
||||||
// Set the working directory for future RUN/CMD/etc statements.
|
// Set the working directory for future RUN/CMD/etc statements.
|
||||||
func workdir(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
|
func workdir(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string, heredocs []buildkitparser.Heredoc) error {
|
||||||
if len(args) != 1 {
|
if len(args) != 1 {
|
||||||
return errExactlyOneArgument("WORKDIR")
|
return errExactlyOneArgument("WORKDIR")
|
||||||
}
|
}
|
||||||
|
@ -331,7 +369,7 @@ func workdir(b *Builder, args []string, attributes map[string]bool, flagArgs []s
|
||||||
// RUN echo hi # sh -c echo hi (Linux)
|
// RUN echo hi # sh -c echo hi (Linux)
|
||||||
// RUN echo hi # cmd /S /C echo hi (Windows)
|
// RUN echo hi # cmd /S /C echo hi (Windows)
|
||||||
// RUN [ "echo", "hi" ] # echo hi
|
// RUN [ "echo", "hi" ] # echo hi
|
||||||
func run(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
|
func run(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string, heredocs []buildkitparser.Heredoc) error {
|
||||||
if b.RunConfig.Image == "" {
|
if b.RunConfig.Image == "" {
|
||||||
return fmt.Errorf("Please provide a source image with `from` prior to run")
|
return fmt.Errorf("Please provide a source image with `from` prior to run")
|
||||||
}
|
}
|
||||||
|
@ -363,10 +401,16 @@ func run(b *Builder, args []string, attributes map[string]bool, flagArgs []strin
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
files, err := processHereDocs(original, heredocs, userArgs)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
run := Run{
|
run := Run{
|
||||||
Args: args,
|
Args: args,
|
||||||
Mounts: mounts,
|
Mounts: mounts,
|
||||||
Network: network,
|
Network: network,
|
||||||
|
Files: files,
|
||||||
}
|
}
|
||||||
|
|
||||||
if !attributes["json"] {
|
if !attributes["json"] {
|
||||||
|
@ -380,7 +424,7 @@ func run(b *Builder, args []string, attributes map[string]bool, flagArgs []strin
|
||||||
//
|
//
|
||||||
// Set the default command to run in the container (which may be empty).
|
// Set the default command to run in the container (which may be empty).
|
||||||
// Argument handling is the same as RUN.
|
// Argument handling is the same as RUN.
|
||||||
func cmd(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
|
func cmd(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string, heredocs []buildkitparser.Heredoc) error {
|
||||||
cmdSlice := handleJSONArgs(args, attributes)
|
cmdSlice := handleJSONArgs(args, attributes)
|
||||||
|
|
||||||
if !attributes["json"] {
|
if !attributes["json"] {
|
||||||
|
@ -405,7 +449,7 @@ func cmd(b *Builder, args []string, attributes map[string]bool, flagArgs []strin
|
||||||
//
|
//
|
||||||
// Handles command processing similar to CMD and RUN, only b.RunConfig.Entrypoint
|
// Handles command processing similar to CMD and RUN, only b.RunConfig.Entrypoint
|
||||||
// is initialized at NewBuilder time instead of through argument parsing.
|
// is initialized at NewBuilder time instead of through argument parsing.
|
||||||
func entrypoint(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
|
func entrypoint(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string, heredocs []buildkitparser.Heredoc) error {
|
||||||
parsed := handleJSONArgs(args, attributes)
|
parsed := handleJSONArgs(args, attributes)
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
|
@ -436,7 +480,7 @@ func entrypoint(b *Builder, args []string, attributes map[string]bool, flagArgs
|
||||||
//
|
//
|
||||||
// Expose ports for links and port mappings. This all ends up in
|
// Expose ports for links and port mappings. This all ends up in
|
||||||
// b.RunConfig.ExposedPorts for runconfig.
|
// b.RunConfig.ExposedPorts for runconfig.
|
||||||
func expose(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
|
func expose(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string, heredocs []buildkitparser.Heredoc) error {
|
||||||
if len(args) == 0 {
|
if len(args) == 0 {
|
||||||
return errAtLeastOneArgument("EXPOSE")
|
return errAtLeastOneArgument("EXPOSE")
|
||||||
}
|
}
|
||||||
|
@ -463,7 +507,7 @@ func expose(b *Builder, args []string, attributes map[string]bool, flagArgs []st
|
||||||
//
|
//
|
||||||
// Set the user to 'foo' for future commands and when running the
|
// Set the user to 'foo' for future commands and when running the
|
||||||
// ENTRYPOINT/CMD at container run time.
|
// ENTRYPOINT/CMD at container run time.
|
||||||
func user(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
|
func user(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string, heredocs []buildkitparser.Heredoc) error {
|
||||||
if len(args) != 1 {
|
if len(args) != 1 {
|
||||||
return errExactlyOneArgument("USER")
|
return errExactlyOneArgument("USER")
|
||||||
}
|
}
|
||||||
|
@ -475,7 +519,7 @@ func user(b *Builder, args []string, attributes map[string]bool, flagArgs []stri
|
||||||
// VOLUME /foo
|
// VOLUME /foo
|
||||||
//
|
//
|
||||||
// Expose the volume /foo for use. Will also accept the JSON array form.
|
// Expose the volume /foo for use. Will also accept the JSON array form.
|
||||||
func volume(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
|
func volume(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string, heredocs []buildkitparser.Heredoc) error {
|
||||||
if len(args) == 0 {
|
if len(args) == 0 {
|
||||||
return errAtLeastOneArgument("VOLUME")
|
return errAtLeastOneArgument("VOLUME")
|
||||||
}
|
}
|
||||||
|
@ -497,7 +541,7 @@ func volume(b *Builder, args []string, attributes map[string]bool, flagArgs []st
|
||||||
// STOPSIGNAL signal
|
// STOPSIGNAL signal
|
||||||
//
|
//
|
||||||
// Set the signal that will be used to kill the container.
|
// Set the signal that will be used to kill the container.
|
||||||
func stopSignal(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
|
func stopSignal(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string, heredocs []buildkitparser.Heredoc) error {
|
||||||
if len(args) != 1 {
|
if len(args) != 1 {
|
||||||
return errExactlyOneArgument("STOPSIGNAL")
|
return errExactlyOneArgument("STOPSIGNAL")
|
||||||
}
|
}
|
||||||
|
@ -515,7 +559,7 @@ func stopSignal(b *Builder, args []string, attributes map[string]bool, flagArgs
|
||||||
//
|
//
|
||||||
// Set the default healthcheck command to run in the container (which may be empty).
|
// Set the default healthcheck command to run in the container (which may be empty).
|
||||||
// Argument handling is the same as RUN.
|
// Argument handling is the same as RUN.
|
||||||
func healthcheck(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
|
func healthcheck(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string, heredocs []buildkitparser.Heredoc) error {
|
||||||
if len(args) == 0 {
|
if len(args) == 0 {
|
||||||
return errAtLeastOneArgument("HEALTHCHECK")
|
return errAtLeastOneArgument("HEALTHCHECK")
|
||||||
}
|
}
|
||||||
|
@ -608,7 +652,7 @@ var targetArgs = []string{"TARGETOS", "TARGETARCH", "TARGETVARIANT"}
|
||||||
// Adds the variable foo to the trusted list of variables that can be passed
|
// Adds the variable foo to the trusted list of variables that can be passed
|
||||||
// to builder using the --build-arg flag for expansion/subsitution or passing to 'run'.
|
// to builder using the --build-arg flag for expansion/subsitution or passing to 'run'.
|
||||||
// Dockerfile author may optionally set a default value of this variable.
|
// Dockerfile author may optionally set a default value of this variable.
|
||||||
func arg(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
|
func arg(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string, heredocs []buildkitparser.Heredoc) error {
|
||||||
var (
|
var (
|
||||||
name string
|
name string
|
||||||
value string
|
value string
|
||||||
|
@ -674,7 +718,7 @@ func arg(b *Builder, args []string, attributes map[string]bool, flagArgs []strin
|
||||||
// SHELL powershell -command
|
// SHELL powershell -command
|
||||||
//
|
//
|
||||||
// Set the non-default shell to use.
|
// Set the non-default shell to use.
|
||||||
func shell(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
|
func shell(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string, heredocs []buildkitparser.Heredoc) error {
|
||||||
shellSlice := handleJSONArgs(args, attributes)
|
shellSlice := handleJSONArgs(args, attributes)
|
||||||
switch {
|
switch {
|
||||||
case len(shellSlice) == 0:
|
case len(shellSlice) == 0:
|
||||||
|
|
|
@ -777,6 +777,9 @@ func (e *ClientExecutor) UnrecognizedInstruction(step *imagebuilder.Step) error
|
||||||
// the user command into a shell and perform those operations before. Since RUN
|
// the user command into a shell and perform those operations before. Since RUN
|
||||||
// requires /bin/sh, we can use both 'cd' and 'export'.
|
// requires /bin/sh, we can use both 'cd' and 'export'.
|
||||||
func (e *ClientExecutor) Run(run imagebuilder.Run, config docker.Config) error {
|
func (e *ClientExecutor) Run(run imagebuilder.Run, config docker.Config) error {
|
||||||
|
if len(run.Files) > 0 {
|
||||||
|
return fmt.Errorf("Heredoc syntax is not supported")
|
||||||
|
}
|
||||||
if len(run.Mounts) > 0 {
|
if len(run.Mounts) > 0 {
|
||||||
return fmt.Errorf("RUN --mount not supported")
|
return fmt.Errorf("RUN --mount not supported")
|
||||||
}
|
}
|
||||||
|
@ -879,6 +882,9 @@ func (e *ClientExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) er
|
||||||
if copy.Checksum != "" {
|
if copy.Checksum != "" {
|
||||||
return fmt.Errorf("ADD --checksum not supported")
|
return fmt.Errorf("ADD --checksum not supported")
|
||||||
}
|
}
|
||||||
|
if len(copy.Files) > 0 {
|
||||||
|
return fmt.Errorf("Heredoc syntax is not supported")
|
||||||
|
}
|
||||||
e.Volumes.Invalidate(copy.Dest)
|
e.Volumes.Invalidate(copy.Dest)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -15,6 +15,8 @@ import (
|
||||||
|
|
||||||
sRegexp "github.com/containers/storage/pkg/regexp"
|
sRegexp "github.com/containers/storage/pkg/regexp"
|
||||||
"github.com/containers/storage/pkg/system"
|
"github.com/containers/storage/pkg/system"
|
||||||
|
buildkitparser "github.com/moby/buildkit/frontend/dockerfile/parser"
|
||||||
|
buildkitshell "github.com/moby/buildkit/frontend/dockerfile/shell"
|
||||||
"github.com/openshift/imagebuilder/dockerfile/command"
|
"github.com/openshift/imagebuilder/dockerfile/command"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -30,14 +32,15 @@ import (
|
||||||
// but lucky for us the Dockerfile isn't very complicated. This structure
|
// but lucky for us the Dockerfile isn't very complicated. This structure
|
||||||
// works a little more effectively than a "proper" parse tree for our needs.
|
// works a little more effectively than a "proper" parse tree for our needs.
|
||||||
type Node struct {
|
type Node struct {
|
||||||
Value string // actual content
|
Value string // actual content
|
||||||
Next *Node // the next item in the current sexp
|
Next *Node // the next item in the current sexp
|
||||||
Children []*Node // the children of this sexp
|
Children []*Node // the children of this sexp
|
||||||
Attributes map[string]bool // special attributes for this node
|
Heredocs []buildkitparser.Heredoc // extra heredoc content attachments
|
||||||
Original string // original line used before parsing
|
Attributes map[string]bool // special attributes for this node
|
||||||
Flags []string // only top Node should have this set
|
Original string // original line used before parsing
|
||||||
StartLine int // the line in the original dockerfile where the node begins
|
Flags []string // only top Node should have this set
|
||||||
EndLine int // the line in the original dockerfile where the node ends
|
StartLine int // the line in the original dockerfile where the node begins
|
||||||
|
EndLine int // the line in the original dockerfile where the node ends
|
||||||
}
|
}
|
||||||
|
|
||||||
// Dump dumps the AST defined by `node` as a list of sexps.
|
// Dump dumps the AST defined by `node` as a list of sexps.
|
||||||
|
@ -53,7 +56,11 @@ func (node *Node) Dump() string {
|
||||||
for _, n := range node.Children {
|
for _, n := range node.Children {
|
||||||
str += "(" + n.Dump() + ")\n"
|
str += "(" + n.Dump() + ")\n"
|
||||||
}
|
}
|
||||||
|
if len(node.Heredocs) > 0 {
|
||||||
|
for _, doc := range node.Heredocs {
|
||||||
|
str += "(" + doc.Name + "-" + doc.Content + "-" + strconv.Itoa(int(doc.FileDescriptor)) + "-" + strconv.FormatBool(doc.Expand) + "-" + strconv.FormatBool(doc.Chomp) + ")\n"
|
||||||
|
}
|
||||||
|
}
|
||||||
for n := node.Next; n != nil; n = n.Next {
|
for n := node.Next; n != nil; n = n.Next {
|
||||||
if len(n.Children) > 0 {
|
if len(n.Children) > 0 {
|
||||||
str += " " + n.Dump()
|
str += " " + n.Dump()
|
||||||
|
@ -70,6 +77,24 @@ func (node *Node) lines(start, end int) {
|
||||||
node.EndLine = end
|
node.EndLine = end
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (node *Node) canContainHeredoc() bool {
|
||||||
|
// check for compound commands, like ONBUILD
|
||||||
|
if ok := heredocCompoundDirectives[strings.ToLower(node.Value)]; ok {
|
||||||
|
if node.Next != nil && len(node.Next.Children) > 0 {
|
||||||
|
node = node.Next.Children[0]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if ok := heredocDirectives[strings.ToLower(node.Value)]; !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if isJSON := node.Attributes["json"]; isJSON {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
// AddChild adds a new child node, and updates line information
|
// AddChild adds a new child node, and updates line information
|
||||||
func (node *Node) AddChild(child *Node, startLine, endLine int) {
|
func (node *Node) AddChild(child *Node, startLine, endLine int) {
|
||||||
child.lines(startLine, endLine)
|
child.lines(startLine, endLine)
|
||||||
|
@ -94,6 +119,20 @@ const DefaultEscapeToken = '\\'
|
||||||
// defaultPlatformToken is the platform assumed for the build if not explicitly provided
|
// defaultPlatformToken is the platform assumed for the build if not explicitly provided
|
||||||
var defaultPlatformToken = runtime.GOOS
|
var defaultPlatformToken = runtime.GOOS
|
||||||
|
|
||||||
|
var (
|
||||||
|
// Directives allowed to contain heredocs
|
||||||
|
heredocDirectives = map[string]bool{
|
||||||
|
command.Add: true,
|
||||||
|
command.Copy: true,
|
||||||
|
command.Run: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Directives allowed to contain directives containing heredocs
|
||||||
|
heredocCompoundDirectives = map[string]bool{
|
||||||
|
command.Onbuild: true,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
// Directive is the structure used during a build run to hold the state of
|
// Directive is the structure used during a build run to hold the state of
|
||||||
// parsing directives.
|
// parsing directives.
|
||||||
type Directive struct {
|
type Directive struct {
|
||||||
|
@ -313,6 +352,39 @@ func Parse(rwc io.Reader) (*Result, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if child.canContainHeredoc() {
|
||||||
|
heredocs, err := heredocsFromLine(line)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, heredoc := range heredocs {
|
||||||
|
terminator := []byte(heredoc.Name)
|
||||||
|
terminated := false
|
||||||
|
for scanner.Scan() {
|
||||||
|
bytesRead := scanner.Bytes()
|
||||||
|
currentLine++
|
||||||
|
|
||||||
|
possibleTerminator := trimNewline(bytesRead)
|
||||||
|
if heredoc.Chomp {
|
||||||
|
possibleTerminator = trimLeadingTabs(possibleTerminator)
|
||||||
|
}
|
||||||
|
if bytes.Equal(possibleTerminator, terminator) {
|
||||||
|
terminated = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
heredoc.Content += "\n"
|
||||||
|
heredoc.Content += string(bytesRead)
|
||||||
|
}
|
||||||
|
if !terminated {
|
||||||
|
return nil, fmt.Errorf("%s: unterminated heredoc", heredoc.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
child.Heredocs = append(child.Heredocs, heredoc)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
root.AddChild(child, startLine, currentLine)
|
root.AddChild(child, startLine, currentLine)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -331,6 +403,26 @@ func Parse(rwc io.Reader) (*Result, error) {
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func heredocsFromLine(line string) ([]buildkitparser.Heredoc, error) {
|
||||||
|
shlex := buildkitshell.NewLex('\\')
|
||||||
|
shlex.RawQuotes = true
|
||||||
|
shlex.RawEscapes = true
|
||||||
|
shlex.SkipUnsetEnv = true
|
||||||
|
words, _ := shlex.ProcessWords(line, []string{})
|
||||||
|
|
||||||
|
var docs []buildkitparser.Heredoc
|
||||||
|
for _, word := range words {
|
||||||
|
heredoc, err := buildkitparser.ParseHeredoc(word)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if heredoc != nil {
|
||||||
|
docs = append(docs, *heredoc)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return docs, nil
|
||||||
|
}
|
||||||
|
|
||||||
func trimComments(src []byte) []byte {
|
func trimComments(src []byte) []byte {
|
||||||
return tokenComment.ReplaceAll(src, []byte{})
|
return tokenComment.ReplaceAll(src, []byte{})
|
||||||
}
|
}
|
||||||
|
@ -339,6 +431,16 @@ func trimWhitespace(src []byte) []byte {
|
||||||
return bytes.TrimLeftFunc(src, unicode.IsSpace)
|
return bytes.TrimLeftFunc(src, unicode.IsSpace)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func trimLeadingWhitespace(src []byte) []byte {
|
||||||
|
return bytes.TrimLeftFunc(src, unicode.IsSpace)
|
||||||
|
}
|
||||||
|
func trimLeadingTabs(src []byte) []byte {
|
||||||
|
return bytes.TrimLeft(src, "\t")
|
||||||
|
}
|
||||||
|
func trimNewline(src []byte) []byte {
|
||||||
|
return bytes.TrimRight(src, "\r\n")
|
||||||
|
}
|
||||||
|
|
||||||
func isEmptyContinuationLine(line []byte) bool {
|
func isEmptyContinuationLine(line []byte) bool {
|
||||||
return len(trimComments(trimWhitespace(line))) == 0
|
return len(trimComments(trimWhitespace(line))) == 0
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,6 +7,7 @@ import (
|
||||||
|
|
||||||
"github.com/openshift/imagebuilder/dockerfile/command"
|
"github.com/openshift/imagebuilder/dockerfile/command"
|
||||||
"github.com/openshift/imagebuilder/dockerfile/parser"
|
"github.com/openshift/imagebuilder/dockerfile/parser"
|
||||||
|
buildkitparser "github.com/moby/buildkit/frontend/dockerfile/parser"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ParseDockerfile parses the provided stream as a canonical Dockerfile
|
// ParseDockerfile parses the provided stream as a canonical Dockerfile
|
||||||
|
@ -34,14 +35,10 @@ var replaceEnvAllowed = map[string]bool{
|
||||||
|
|
||||||
// Certain commands are allowed to have their args split into more
|
// Certain commands are allowed to have their args split into more
|
||||||
// words after env var replacements. Meaning:
|
// words after env var replacements. Meaning:
|
||||||
//
|
// ENV foo="123 456"
|
||||||
// ENV foo="123 456"
|
// EXPOSE $foo
|
||||||
// EXPOSE $foo
|
|
||||||
//
|
|
||||||
// should result in the same thing as:
|
// should result in the same thing as:
|
||||||
//
|
// EXPOSE 123 456
|
||||||
// EXPOSE 123 456
|
|
||||||
//
|
|
||||||
// and not treat "123 456" as a single word.
|
// and not treat "123 456" as a single word.
|
||||||
// Note that: EXPOSE "$foo" and EXPOSE $foo are not the same thing.
|
// Note that: EXPOSE "$foo" and EXPOSE $foo are not the same thing.
|
||||||
// Quotes will cause it to still be treated as single word.
|
// Quotes will cause it to still be treated as single word.
|
||||||
|
@ -59,6 +56,7 @@ type Step struct {
|
||||||
Flags []string
|
Flags []string
|
||||||
Attrs map[string]bool
|
Attrs map[string]bool
|
||||||
Message string
|
Message string
|
||||||
|
Heredocs []buildkitparser.Heredoc
|
||||||
Original string
|
Original string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -78,6 +76,7 @@ type Step struct {
|
||||||
// deal with that, at least until it becomes more of a general concern with new
|
// deal with that, at least until it becomes more of a general concern with new
|
||||||
// features.
|
// features.
|
||||||
func (b *Step) Resolve(ast *parser.Node) error {
|
func (b *Step) Resolve(ast *parser.Node) error {
|
||||||
|
b.Heredocs = ast.Heredocs
|
||||||
cmd := ast.Value
|
cmd := ast.Value
|
||||||
upperCasedCmd := strings.ToUpper(cmd)
|
upperCasedCmd := strings.ToUpper(cmd)
|
||||||
|
|
||||||
|
|
|
@ -76,6 +76,9 @@ github.com/containerd/log
|
||||||
## explicit; go 1.19
|
## explicit; go 1.19
|
||||||
github.com/containerd/stargz-snapshotter/estargz
|
github.com/containerd/stargz-snapshotter/estargz
|
||||||
github.com/containerd/stargz-snapshotter/estargz/errorutil
|
github.com/containerd/stargz-snapshotter/estargz/errorutil
|
||||||
|
# github.com/containerd/typeurl v1.0.2
|
||||||
|
## explicit; go 1.13
|
||||||
|
github.com/containerd/typeurl
|
||||||
# github.com/containernetworking/cni v1.1.2
|
# github.com/containernetworking/cni v1.1.2
|
||||||
## explicit; go 1.14
|
## explicit; go 1.14
|
||||||
github.com/containernetworking/cni/libcni
|
github.com/containernetworking/cni/libcni
|
||||||
|
@ -394,6 +397,8 @@ github.com/go-openapi/validate
|
||||||
# github.com/gogo/protobuf v1.3.2
|
# github.com/gogo/protobuf v1.3.2
|
||||||
## explicit; go 1.15
|
## explicit; go 1.15
|
||||||
github.com/gogo/protobuf/proto
|
github.com/gogo/protobuf/proto
|
||||||
|
github.com/gogo/protobuf/sortkeys
|
||||||
|
github.com/gogo/protobuf/types
|
||||||
# github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da
|
# github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da
|
||||||
## explicit
|
## explicit
|
||||||
github.com/golang/groupcache/lru
|
github.com/golang/groupcache/lru
|
||||||
|
@ -493,6 +498,12 @@ github.com/mistifyio/go-zfs/v3
|
||||||
# github.com/mitchellh/mapstructure v1.5.0
|
# github.com/mitchellh/mapstructure v1.5.0
|
||||||
## explicit; go 1.14
|
## explicit; go 1.14
|
||||||
github.com/mitchellh/mapstructure
|
github.com/mitchellh/mapstructure
|
||||||
|
# github.com/moby/buildkit v0.10.6
|
||||||
|
## explicit; go 1.17
|
||||||
|
github.com/moby/buildkit/frontend/dockerfile/command
|
||||||
|
github.com/moby/buildkit/frontend/dockerfile/parser
|
||||||
|
github.com/moby/buildkit/frontend/dockerfile/shell
|
||||||
|
github.com/moby/buildkit/util/stack
|
||||||
# github.com/moby/patternmatcher v0.5.0
|
# github.com/moby/patternmatcher v0.5.0
|
||||||
## explicit; go 1.19
|
## explicit; go 1.19
|
||||||
github.com/moby/patternmatcher
|
github.com/moby/patternmatcher
|
||||||
|
@ -590,7 +601,7 @@ github.com/opencontainers/selinux/go-selinux
|
||||||
github.com/opencontainers/selinux/go-selinux/label
|
github.com/opencontainers/selinux/go-selinux/label
|
||||||
github.com/opencontainers/selinux/pkg/pwalk
|
github.com/opencontainers/selinux/pkg/pwalk
|
||||||
github.com/opencontainers/selinux/pkg/pwalkdir
|
github.com/opencontainers/selinux/pkg/pwalkdir
|
||||||
# github.com/openshift/imagebuilder v1.2.6-0.20231108213319-b27edc077bbc
|
# github.com/openshift/imagebuilder v1.2.6-0.20231110114814-35a50d57f722
|
||||||
## explicit; go 1.19
|
## explicit; go 1.19
|
||||||
github.com/openshift/imagebuilder
|
github.com/openshift/imagebuilder
|
||||||
github.com/openshift/imagebuilder/dockerclient
|
github.com/openshift/imagebuilder/dockerclient
|
||||||
|
|
Loading…
Reference in New Issue