Add support for shell
Also vendor in the latest imagebuilder code and all the packages that come with it. Note: imagebuilder.NewBuilderForReader has been removed from imagebuilder so I had to split the function up into two different calls. Signed-off-by: Daniel J Walsh <dwalsh@redhat.com> Closes: #517 Approved by: rhatdan
This commit is contained in:
parent
ae781bd476
commit
5ce80091ba
|
@ -59,6 +59,10 @@ var (
|
|||
Name: "port, p",
|
||||
Usage: "add `port` to expose when running containers based on image",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "shell",
|
||||
Usage: "add `shell` to run in containers",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "user, u",
|
||||
Usage: "set default `user` to run inside containers based on image",
|
||||
|
@ -100,6 +104,14 @@ func updateConfig(builder *buildah.Builder, c *cli.Context) {
|
|||
if c.IsSet("user") {
|
||||
builder.SetUser(c.String("user"))
|
||||
}
|
||||
if c.IsSet("shell") {
|
||||
shellSpec, err := shellwords.Parse(c.String("shell"))
|
||||
if err != nil {
|
||||
logrus.Errorf("error parsing --shell %q: %v", c.String("shell"), err)
|
||||
} else {
|
||||
builder.SetShell(shellSpec)
|
||||
}
|
||||
}
|
||||
if c.IsSet("port") || c.IsSet("p") {
|
||||
for _, portSpec := range c.StringSlice("port") {
|
||||
builder.SetPort(portSpec)
|
||||
|
|
15
config.go
15
config.go
|
@ -343,6 +343,21 @@ func (b *Builder) SetWorkDir(there string) {
|
|||
b.Docker.Config.WorkingDir = there
|
||||
}
|
||||
|
||||
// Shell returns the default shell for running commands in the
|
||||
// container, or in a container built using an image built from this container.
|
||||
func (b *Builder) Shell() []string {
|
||||
return b.Docker.Config.Shell
|
||||
}
|
||||
|
||||
// SetShell sets the default shell for running
|
||||
// commands in the container, or in a container built using an image built from
|
||||
// this container.
|
||||
// Note: this setting is not present in the OCIv1 image format, so it is
|
||||
// discarded when writing images using OCIv1 formats.
|
||||
func (b *Builder) SetShell(shell []string) {
|
||||
b.Docker.Config.Shell = shell
|
||||
}
|
||||
|
||||
// Env returns a list of key-value pairs to be set when running commands in the
|
||||
// container, or in a container built using an image built from this container.
|
||||
func (b *Builder) Env() []string {
|
||||
|
|
|
@ -277,6 +277,7 @@ return 1
|
|||
--os
|
||||
--port
|
||||
-p
|
||||
--shell
|
||||
--user
|
||||
-u
|
||||
--volume
|
||||
|
|
|
@ -65,6 +65,13 @@ its OS is kept, otherwise the host's OS's name is recorded.
|
|||
Specifies a *port* to expose when running containers based on any images which
|
||||
will be built using the specified container.
|
||||
|
||||
**--shell** *shell*
|
||||
|
||||
Specify the *shell* to run inside of the container image
|
||||
The shell instruction allows the default shell used for the shell form of commands to be overridden. The default shell for Linux containers is "/bin/sh -c".
|
||||
|
||||
Note: this setting is not present in the OCIv1 image format, so it is discarded when writing images using OCIv1 formats.
|
||||
|
||||
**--user** *user*
|
||||
|
||||
Specify the *user* as whom containers based on images which will be built using
|
||||
|
|
|
@ -504,6 +504,7 @@ func (b *Executor) Prepare(ib *imagebuilder.Builder, node *parser.Node, from str
|
|||
WorkingDir: builder.WorkDir(),
|
||||
Entrypoint: builder.Entrypoint(),
|
||||
Labels: builder.Labels(),
|
||||
Shell: builder.Shell(),
|
||||
}
|
||||
var rootfs *docker.RootFS
|
||||
if builder.Docker.RootFS != nil {
|
||||
|
@ -620,6 +621,7 @@ func (b *Executor) Commit(ib *imagebuilder.Builder) (err error) {
|
|||
}
|
||||
b.builder.SetWorkDir(config.WorkingDir)
|
||||
b.builder.SetEntrypoint(config.Entrypoint)
|
||||
b.builder.SetShell(config.Shell)
|
||||
b.builder.ClearLabels()
|
||||
for k, v := range config.Labels {
|
||||
b.builder.SetLabel(k, v)
|
||||
|
@ -673,44 +675,19 @@ func (b *Executor) Build(ib *imagebuilder.Builder, node []*parser.Node) (err err
|
|||
return nil
|
||||
}
|
||||
|
||||
// BuildReadClosers parses a set of one or more already-opened Dockerfiles,
|
||||
// creates a new Executor, and then runs Prepare/Execute/Commit/Delete over the
|
||||
// entire set of instructions.
|
||||
func BuildReadClosers(store storage.Store, options BuildOptions, dockerfile ...io.ReadCloser) error {
|
||||
mainFile := dockerfile[0]
|
||||
extraFiles := dockerfile[1:]
|
||||
for _, dfile := range dockerfile {
|
||||
defer dfile.Close()
|
||||
}
|
||||
builder, parsed, err := imagebuilder.NewBuilderForReader(mainFile, options.Args)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error creating builder")
|
||||
}
|
||||
exec, err := NewExecutor(store, options)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error creating build executor")
|
||||
}
|
||||
nodes := []*parser.Node{parsed}
|
||||
for _, extra := range extraFiles {
|
||||
_, parsed, err := imagebuilder.NewBuilderForReader(extra, options.Args)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error parsing dockerfile")
|
||||
}
|
||||
nodes = append(nodes, parsed)
|
||||
}
|
||||
return exec.Build(builder, nodes)
|
||||
}
|
||||
|
||||
// BuildDockerfiles parses a set of one or more Dockerfiles (which may be
|
||||
// URLs), creates a new Executor, and then runs Prepare/Execute/Commit/Delete
|
||||
// over the entire set of instructions.
|
||||
func BuildDockerfiles(store storage.Store, options BuildOptions, dockerfile ...string) error {
|
||||
var dockerfiles []io.ReadCloser
|
||||
if len(dockerfile) == 0 {
|
||||
return errors.Errorf("error building: no dockerfiles specified")
|
||||
}
|
||||
nodes := []*parser.Node{}
|
||||
for _, dfile := range dockerfile {
|
||||
var rc io.ReadCloser
|
||||
var (
|
||||
parsed *parser.Node
|
||||
err error
|
||||
)
|
||||
if strings.HasPrefix(dfile, "http://") || strings.HasPrefix(dfile, "https://") {
|
||||
logrus.Debugf("reading remote Dockerfile %q", dfile)
|
||||
resp, err := http.Get(dfile)
|
||||
|
@ -721,32 +698,25 @@ func BuildDockerfiles(store storage.Store, options BuildOptions, dockerfile ...s
|
|||
resp.Body.Close()
|
||||
return errors.Errorf("no contents in %q", dfile)
|
||||
}
|
||||
rc = resp.Body
|
||||
parsed, err = imagebuilder.ParseDockerfile(resp.Body)
|
||||
resp.Body.Close()
|
||||
} else {
|
||||
if !filepath.IsAbs(dfile) {
|
||||
logrus.Debugf("resolving local Dockerfile %q", dfile)
|
||||
dfile = filepath.Join(options.ContextDirectory, dfile)
|
||||
}
|
||||
logrus.Debugf("reading local Dockerfile %q", dfile)
|
||||
contents, err := os.Open(dfile)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading %q", dfile)
|
||||
}
|
||||
dinfo, err := contents.Stat()
|
||||
if err != nil {
|
||||
contents.Close()
|
||||
return errors.Wrapf(err, "error reading info about %q", dfile)
|
||||
}
|
||||
if dinfo.Size() == 0 {
|
||||
contents.Close()
|
||||
return errors.Wrapf(err, "no contents in %q", dfile)
|
||||
}
|
||||
rc = contents
|
||||
parsed, err = imagebuilder.ParseFile(dfile)
|
||||
}
|
||||
dockerfiles = append(dockerfiles, rc)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to parse %s", dfile)
|
||||
}
|
||||
nodes = append(nodes, parsed)
|
||||
}
|
||||
if err := BuildReadClosers(store, options, dockerfiles...); err != nil {
|
||||
return errors.Wrapf(err, "error building")
|
||||
builder := imagebuilder.NewBuilder(options.Args)
|
||||
exec, err := NewExecutor(store, options)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error creating build executor")
|
||||
}
|
||||
return nil
|
||||
return exec.Build(builder, nodes)
|
||||
}
|
||||
|
|
2
run.go
2
run.go
|
@ -58,6 +58,8 @@ type RunOptions struct {
|
|||
User string
|
||||
// WorkingDir is an override for the working directory.
|
||||
WorkingDir string
|
||||
// Shell is default shell to run in a container.
|
||||
Shell string
|
||||
// Cmd is an override for the configured default command.
|
||||
Cmd []string
|
||||
// Entrypoint is an override for the configured entry point.
|
||||
|
|
|
@ -262,3 +262,25 @@ load helpers
|
|||
[ "$status" -eq 0 ]
|
||||
[ "$output" = "" ]
|
||||
}
|
||||
|
||||
@test "bud-shell" {
|
||||
target=alpine-image
|
||||
buildah bud --format docker --signature-policy ${TESTSDIR}/policy.json -t ${target} ${TESTSDIR}/bud/shell
|
||||
run buildah --debug=false inspect --type=image --format '{{printf "%q" .Docker.Config.Shell}}' ${target}
|
||||
echo $output
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = '["/bin/sh" "-c"]' ]
|
||||
ctr=$(buildah from ${target})
|
||||
run buildah --debug=false config --shell "/bin/bash -c" ${ctr}
|
||||
echo $output
|
||||
[ "$status" -eq 0 ]
|
||||
run buildah --debug=false inspect --type=container --format '{{printf "%q" .Docker.Config.Shell}}' ${ctr}
|
||||
echo $output
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = '["/bin/bash" "-c"]' ]
|
||||
buildah rm ${ctr}
|
||||
buildah rmi $(buildah --debug=false images -q)
|
||||
run buildah --debug=false images -q
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = "" ]
|
||||
}
|
||||
|
|
|
@ -0,0 +1,3 @@
|
|||
FROM alpine
|
||||
SHELL [ "/bin/sh", "-c" ]
|
||||
|
|
@ -4,7 +4,7 @@ github.com/blang/semver master
|
|||
github.com/containers/image master
|
||||
github.com/containers/storage master
|
||||
github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716
|
||||
github.com/docker/docker 30eb4d8cdc422b023d5f11f29a82ecb73554183b
|
||||
github.com/docker/docker b8571fd81c7d2223c9ecbf799c693e3ef1daaea9
|
||||
github.com/docker/engine-api master
|
||||
github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d
|
||||
github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52
|
||||
|
|
|
@ -0,0 +1,202 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
|
@ -0,0 +1,74 @@
|
|||
# continuity
|
||||
|
||||
[](https://godoc.org/github.com/containerd/continuity)
|
||||
[](https://travis-ci.org/containerd/continuity)
|
||||
|
||||
A transport-agnostic, filesystem metadata manifest system
|
||||
|
||||
This project is a staging area for experiments in providing transport agnostic
|
||||
metadata storage.
|
||||
|
||||
Please see https://github.com/opencontainers/specs/issues/11 for more details.
|
||||
|
||||
## Manifest Format
|
||||
|
||||
A continuity manifest encodes filesystem metadata in Protocol Buffers.
|
||||
Please refer to [proto/manifest.proto](proto/manifest.proto).
|
||||
|
||||
## Usage
|
||||
|
||||
Build:
|
||||
|
||||
```console
|
||||
$ make
|
||||
```
|
||||
|
||||
Create a manifest (of this repo itself):
|
||||
|
||||
```console
|
||||
$ ./bin/continuity build . > /tmp/a.pb
|
||||
```
|
||||
|
||||
Dump a manifest:
|
||||
|
||||
```console
|
||||
$ ./bin/continuity ls /tmp/a.pb
|
||||
...
|
||||
-rw-rw-r-- 270 B /.gitignore
|
||||
-rw-rw-r-- 88 B /.mailmap
|
||||
-rw-rw-r-- 187 B /.travis.yml
|
||||
-rw-rw-r-- 359 B /AUTHORS
|
||||
-rw-rw-r-- 11 kB /LICENSE
|
||||
-rw-rw-r-- 1.5 kB /Makefile
|
||||
...
|
||||
-rw-rw-r-- 986 B /testutil_test.go
|
||||
drwxrwxr-x 0 B /version
|
||||
-rw-rw-r-- 478 B /version/version.go
|
||||
```
|
||||
|
||||
Verify a manifest:
|
||||
|
||||
```console
|
||||
$ ./bin/continuity verify . /tmp/a.pb
|
||||
```
|
||||
|
||||
Break the directory and restore using the manifest:
|
||||
```console
|
||||
$ chmod 777 Makefile
|
||||
$ ./bin/continuity verify . /tmp/a.pb
|
||||
2017/06/23 08:00:34 error verifying manifest: resource "/Makefile" has incorrect mode: -rwxrwxrwx != -rw-rw-r--
|
||||
$ ./bin/continuity apply . /tmp/a.pb
|
||||
$ stat -c %a Makefile
|
||||
664
|
||||
$ ./bin/continuity verify . /tmp/a.pb
|
||||
```
|
||||
|
||||
|
||||
## Contribution Guide
|
||||
### Building Proto Package
|
||||
|
||||
If you change the proto file you will need to rebuild the generated Go with `go generate`.
|
||||
|
||||
```console
|
||||
$ go generate ./proto
|
||||
```
|
|
@ -0,0 +1,85 @@
|
|||
package pathdriver
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// PathDriver provides all of the path manipulation functions in a common
|
||||
// interface. The context should call these and never use the `filepath`
|
||||
// package or any other package to manipulate paths.
|
||||
type PathDriver interface {
|
||||
Join(paths ...string) string
|
||||
IsAbs(path string) bool
|
||||
Rel(base, target string) (string, error)
|
||||
Base(path string) string
|
||||
Dir(path string) string
|
||||
Clean(path string) string
|
||||
Split(path string) (dir, file string)
|
||||
Separator() byte
|
||||
Abs(path string) (string, error)
|
||||
Walk(string, filepath.WalkFunc) error
|
||||
FromSlash(path string) string
|
||||
ToSlash(path string) string
|
||||
Match(pattern, name string) (matched bool, err error)
|
||||
}
|
||||
|
||||
// pathDriver is a simple default implementation calls the filepath package.
|
||||
type pathDriver struct{}
|
||||
|
||||
// LocalPathDriver is the exported pathDriver struct for convenience.
|
||||
var LocalPathDriver PathDriver = &pathDriver{}
|
||||
|
||||
func (*pathDriver) Join(paths ...string) string {
|
||||
return filepath.Join(paths...)
|
||||
}
|
||||
|
||||
func (*pathDriver) IsAbs(path string) bool {
|
||||
return filepath.IsAbs(path)
|
||||
}
|
||||
|
||||
func (*pathDriver) Rel(base, target string) (string, error) {
|
||||
return filepath.Rel(base, target)
|
||||
}
|
||||
|
||||
func (*pathDriver) Base(path string) string {
|
||||
return filepath.Base(path)
|
||||
}
|
||||
|
||||
func (*pathDriver) Dir(path string) string {
|
||||
return filepath.Dir(path)
|
||||
}
|
||||
|
||||
func (*pathDriver) Clean(path string) string {
|
||||
return filepath.Clean(path)
|
||||
}
|
||||
|
||||
func (*pathDriver) Split(path string) (dir, file string) {
|
||||
return filepath.Split(path)
|
||||
}
|
||||
|
||||
func (*pathDriver) Separator() byte {
|
||||
return filepath.Separator
|
||||
}
|
||||
|
||||
func (*pathDriver) Abs(path string) (string, error) {
|
||||
return filepath.Abs(path)
|
||||
}
|
||||
|
||||
// Note that filepath.Walk calls os.Stat, so if the context wants to
|
||||
// to call Driver.Stat() for Walk, they need to create a new struct that
|
||||
// overrides this method.
|
||||
func (*pathDriver) Walk(root string, walkFn filepath.WalkFunc) error {
|
||||
return filepath.Walk(root, walkFn)
|
||||
}
|
||||
|
||||
func (*pathDriver) FromSlash(path string) string {
|
||||
return filepath.FromSlash(path)
|
||||
}
|
||||
|
||||
func (*pathDriver) ToSlash(path string) string {
|
||||
return filepath.ToSlash(path)
|
||||
}
|
||||
|
||||
func (*pathDriver) Match(pattern, name string) (bool, error) {
|
||||
return filepath.Match(pattern, name)
|
||||
}
|
|
@ -0,0 +1,13 @@
|
|||
bazil.org/fuse 371fbbdaa8987b715bdd21d6adc4c9b20155f748
|
||||
github.com/dustin/go-humanize bb3d318650d48840a39aa21a027c6630e198e626
|
||||
github.com/golang/protobuf 1e59b77b52bf8e4b449a57e6f79f21226d571845
|
||||
github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
|
||||
github.com/opencontainers/go-digest 279bed98673dd5bef374d3b6e4b09e2af76183bf
|
||||
github.com/pkg/errors f15c970de5b76fac0b59abb32d62c17cc7bed265
|
||||
github.com/sirupsen/logrus 89742aefa4b206dcf400792f3bd35b542998eb3b
|
||||
github.com/spf13/cobra 2da4a54c5ceefcee7ca5dd0eea1e18a3b6366489
|
||||
github.com/spf13/pflag 4c012f6dcd9546820e378d0bdda4d8fc772cdfea
|
||||
golang.org/x/crypto 9f005a07e0d31d45e6656d241bb5c0f2efd4bc94
|
||||
golang.org/x/net a337091b0525af65de94df2eb7e98bd9962dcbe2
|
||||
golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c
|
||||
golang.org/x/sys 665f6529cca930e27b831a0d1dafffbe1c172924
|
|
@ -1,65 +1,11 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/docker/libtrust"
|
||||
)
|
||||
|
||||
// Common constants for daemon and client.
|
||||
const (
|
||||
// DefaultVersion of Current REST API
|
||||
DefaultVersion string = "1.32"
|
||||
DefaultVersion string = "1.34"
|
||||
|
||||
// NoBaseImageSpecifier is the symbol used by the FROM
|
||||
// command to specify that no base image is to be used.
|
||||
NoBaseImageSpecifier string = "scratch"
|
||||
)
|
||||
|
||||
// LoadOrCreateTrustKey attempts to load the libtrust key at the given path,
|
||||
// otherwise generates a new one
|
||||
func LoadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) {
|
||||
err := system.MkdirAll(filepath.Dir(trustKeyPath), 0700, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
trustKey, err := libtrust.LoadKeyFile(trustKeyPath)
|
||||
if err == libtrust.ErrKeyFileDoesNotExist {
|
||||
trustKey, err = libtrust.GenerateECP256PrivateKey()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error generating key: %s", err)
|
||||
}
|
||||
encodedKey, err := serializePrivateKey(trustKey, filepath.Ext(trustKeyPath))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error serializing key: %s", err)
|
||||
}
|
||||
if err := ioutils.AtomicWriteFile(trustKeyPath, encodedKey, os.FileMode(0600)); err != nil {
|
||||
return nil, fmt.Errorf("Error saving key file: %s", err)
|
||||
}
|
||||
} else if err != nil {
|
||||
return nil, fmt.Errorf("Error loading key file %s: %s", trustKeyPath, err)
|
||||
}
|
||||
return trustKey, nil
|
||||
}
|
||||
|
||||
func serializePrivateKey(key libtrust.PrivateKey, ext string) (encoded []byte, err error) {
|
||||
if ext == ".json" || ext == ".jwk" {
|
||||
encoded, err = json.Marshal(key)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to encode private key JWK: %s", err)
|
||||
}
|
||||
} else {
|
||||
pemBlock, err := key.PEMBlock()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to encode private key PEM: %s", err)
|
||||
}
|
||||
encoded = pem.EncodeToMemory(pemBlock)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
|
|
@ -1,9 +0,0 @@
|
|||
package api
|
||||
|
||||
import "regexp"
|
||||
|
||||
// RestrictedNameChars collects the characters allowed to represent a name, normally used to validate container and volume names.
|
||||
const RestrictedNameChars = `[a-zA-Z0-9][a-zA-Z0-9_.-]`
|
||||
|
||||
// RestrictedNamePattern is a regular expression to validate names against the collection of restricted characters.
|
||||
var RestrictedNamePattern = regexp.MustCompile(`^` + RestrictedNameChars + `+$`)
|
|
@ -181,7 +181,7 @@ type ImageBuildOptions struct {
|
|||
SessionID string
|
||||
|
||||
// TODO @jhowardmsft LCOW Support: This will require extending to include
|
||||
// `Platform string`, but is ommited for now as it's hard-coded temporarily
|
||||
// `Platform string`, but is omitted for now as it's hard-coded temporarily
|
||||
// to avoid API changes.
|
||||
}
|
||||
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Package filters provides helper function to parse and handle command line
|
||||
// filter, used for example in docker ps or docker images commands.
|
||||
/*Package filters provides tools for encoding a mapping of keys to a set of
|
||||
multiple values.
|
||||
*/
|
||||
package filters
|
||||
|
||||
import (
|
||||
|
@ -11,27 +12,34 @@ import (
|
|||
"github.com/docker/docker/api/types/versions"
|
||||
)
|
||||
|
||||
// Args stores filter arguments as map key:{map key: bool}.
|
||||
// It contains an aggregation of the map of arguments (which are in the form
|
||||
// of -f 'key=value') based on the key, and stores values for the same key
|
||||
// in a map with string keys and boolean values.
|
||||
// e.g given -f 'label=label1=1' -f 'label=label2=2' -f 'image.name=ubuntu'
|
||||
// the args will be {"image.name":{"ubuntu":true},"label":{"label1=1":true,"label2=2":true}}
|
||||
// Args stores a mapping of keys to a set of multiple values.
|
||||
type Args struct {
|
||||
fields map[string]map[string]bool
|
||||
}
|
||||
|
||||
// NewArgs initializes a new Args struct.
|
||||
func NewArgs() Args {
|
||||
return Args{fields: map[string]map[string]bool{}}
|
||||
// KeyValuePair are used to initialize a new Args
|
||||
type KeyValuePair struct {
|
||||
Key string
|
||||
Value string
|
||||
}
|
||||
|
||||
// ParseFlag parses the argument to the filter flag. Like
|
||||
// Arg creates a new KeyValuePair for initializing Args
|
||||
func Arg(key, value string) KeyValuePair {
|
||||
return KeyValuePair{Key: key, Value: value}
|
||||
}
|
||||
|
||||
// NewArgs returns a new Args populated with the initial args
|
||||
func NewArgs(initialArgs ...KeyValuePair) Args {
|
||||
args := Args{fields: map[string]map[string]bool{}}
|
||||
for _, arg := range initialArgs {
|
||||
args.Add(arg.Key, arg.Value)
|
||||
}
|
||||
return args
|
||||
}
|
||||
|
||||
// ParseFlag parses a key=value string and adds it to an Args.
|
||||
//
|
||||
// `docker ps -f 'created=today' -f 'image.name=ubuntu*'`
|
||||
//
|
||||
// If prev map is provided, then it is appended to, and returned. By default a new
|
||||
// map is created.
|
||||
// Deprecated: Use Args.Add()
|
||||
func ParseFlag(arg string, prev Args) (Args, error) {
|
||||
filters := prev
|
||||
if len(arg) == 0 {
|
||||
|
@ -52,74 +60,95 @@ func ParseFlag(arg string, prev Args) (Args, error) {
|
|||
return filters, nil
|
||||
}
|
||||
|
||||
// ErrBadFormat is an error returned in case of bad format for a filter.
|
||||
// ErrBadFormat is an error returned when a filter is not in the form key=value
|
||||
//
|
||||
// Deprecated: this error will be removed in a future version
|
||||
var ErrBadFormat = errors.New("bad format of filter (expected name=value)")
|
||||
|
||||
// ToParam packs the Args into a string for easy transport from client to server.
|
||||
// ToParam encodes the Args as args JSON encoded string
|
||||
//
|
||||
// Deprecated: use ToJSON
|
||||
func ToParam(a Args) (string, error) {
|
||||
// this way we don't URL encode {}, just empty space
|
||||
return ToJSON(a)
|
||||
}
|
||||
|
||||
// MarshalJSON returns a JSON byte representation of the Args
|
||||
func (args Args) MarshalJSON() ([]byte, error) {
|
||||
if len(args.fields) == 0 {
|
||||
return []byte{}, nil
|
||||
}
|
||||
return json.Marshal(args.fields)
|
||||
}
|
||||
|
||||
// ToJSON returns the Args as a JSON encoded string
|
||||
func ToJSON(a Args) (string, error) {
|
||||
if a.Len() == 0 {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
buf, err := json.Marshal(a.fields)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(buf), nil
|
||||
buf, err := json.Marshal(a)
|
||||
return string(buf), err
|
||||
}
|
||||
|
||||
// ToParamWithVersion packs the Args into a string for easy transport from client to server.
|
||||
// The generated string will depend on the specified version (corresponding to the API version).
|
||||
// ToParamWithVersion encodes Args as a JSON string. If version is less than 1.22
|
||||
// then the encoded format will use an older legacy format where the values are a
|
||||
// list of strings, instead of a set.
|
||||
//
|
||||
// Deprecated: Use ToJSON
|
||||
func ToParamWithVersion(version string, a Args) (string, error) {
|
||||
// this way we don't URL encode {}, just empty space
|
||||
if a.Len() == 0 {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// for daemons older than v1.10, filter must be of the form map[string][]string
|
||||
var buf []byte
|
||||
var err error
|
||||
if version != "" && versions.LessThan(version, "1.22") {
|
||||
buf, err = json.Marshal(convertArgsToSlice(a.fields))
|
||||
} else {
|
||||
buf, err = json.Marshal(a.fields)
|
||||
buf, err := json.Marshal(convertArgsToSlice(a.fields))
|
||||
return string(buf), err
|
||||
}
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(buf), nil
|
||||
|
||||
return ToJSON(a)
|
||||
}
|
||||
|
||||
// FromParam unpacks the filter Args.
|
||||
// FromParam decodes a JSON encoded string into Args
|
||||
//
|
||||
// Deprecated: use FromJSON
|
||||
func FromParam(p string) (Args, error) {
|
||||
if len(p) == 0 {
|
||||
return NewArgs(), nil
|
||||
}
|
||||
|
||||
r := strings.NewReader(p)
|
||||
d := json.NewDecoder(r)
|
||||
|
||||
m := map[string]map[string]bool{}
|
||||
if err := d.Decode(&m); err != nil {
|
||||
r.Seek(0, 0)
|
||||
|
||||
// Allow parsing old arguments in slice format.
|
||||
// Because other libraries might be sending them in this format.
|
||||
deprecated := map[string][]string{}
|
||||
if deprecatedErr := d.Decode(&deprecated); deprecatedErr == nil {
|
||||
m = deprecatedArgs(deprecated)
|
||||
} else {
|
||||
return NewArgs(), err
|
||||
}
|
||||
}
|
||||
return Args{m}, nil
|
||||
return FromJSON(p)
|
||||
}
|
||||
|
||||
// Get returns the list of values associates with a field.
|
||||
// It returns a slice of strings to keep backwards compatibility with old code.
|
||||
func (filters Args) Get(field string) []string {
|
||||
values := filters.fields[field]
|
||||
// FromJSON decodes a JSON encoded string into Args
|
||||
func FromJSON(p string) (Args, error) {
|
||||
args := NewArgs()
|
||||
|
||||
if p == "" {
|
||||
return args, nil
|
||||
}
|
||||
|
||||
raw := []byte(p)
|
||||
err := json.Unmarshal(raw, &args)
|
||||
if err == nil {
|
||||
return args, nil
|
||||
}
|
||||
|
||||
// Fallback to parsing arguments in the legacy slice format
|
||||
deprecated := map[string][]string{}
|
||||
if legacyErr := json.Unmarshal(raw, &deprecated); legacyErr != nil {
|
||||
return args, err
|
||||
}
|
||||
|
||||
args.fields = deprecatedArgs(deprecated)
|
||||
return args, nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON populates the Args from JSON encode bytes
|
||||
func (args Args) UnmarshalJSON(raw []byte) error {
|
||||
if len(raw) == 0 {
|
||||
return nil
|
||||
}
|
||||
return json.Unmarshal(raw, &args.fields)
|
||||
}
|
||||
|
||||
// Get returns the list of values associated with the key
|
||||
func (args Args) Get(key string) []string {
|
||||
values := args.fields[key]
|
||||
if values == nil {
|
||||
return make([]string, 0)
|
||||
}
|
||||
|
@ -130,37 +159,34 @@ func (filters Args) Get(field string) []string {
|
|||
return slice
|
||||
}
|
||||
|
||||
// Add adds a new value to a filter field.
|
||||
func (filters Args) Add(name, value string) {
|
||||
if _, ok := filters.fields[name]; ok {
|
||||
filters.fields[name][value] = true
|
||||
// Add a new value to the set of values
|
||||
func (args Args) Add(key, value string) {
|
||||
if _, ok := args.fields[key]; ok {
|
||||
args.fields[key][value] = true
|
||||
} else {
|
||||
filters.fields[name] = map[string]bool{value: true}
|
||||
args.fields[key] = map[string]bool{value: true}
|
||||
}
|
||||
}
|
||||
|
||||
// Del removes a value from a filter field.
|
||||
func (filters Args) Del(name, value string) {
|
||||
if _, ok := filters.fields[name]; ok {
|
||||
delete(filters.fields[name], value)
|
||||
if len(filters.fields[name]) == 0 {
|
||||
delete(filters.fields, name)
|
||||
// Del removes a value from the set
|
||||
func (args Args) Del(key, value string) {
|
||||
if _, ok := args.fields[key]; ok {
|
||||
delete(args.fields[key], value)
|
||||
if len(args.fields[key]) == 0 {
|
||||
delete(args.fields, key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Len returns the number of fields in the arguments.
|
||||
func (filters Args) Len() int {
|
||||
return len(filters.fields)
|
||||
// Len returns the number of keys in the mapping
|
||||
func (args Args) Len() int {
|
||||
return len(args.fields)
|
||||
}
|
||||
|
||||
// MatchKVList returns true if the values for the specified field matches the ones
|
||||
// from the sources.
|
||||
// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}},
|
||||
// field is 'label' and sources are {'label1': '1', 'label2': '2'}
|
||||
// it returns true.
|
||||
func (filters Args) MatchKVList(field string, sources map[string]string) bool {
|
||||
fieldValues := filters.fields[field]
|
||||
// MatchKVList returns true if all the pairs in sources exist as key=value
|
||||
// pairs in the mapping at key, or if there are no values at key.
|
||||
func (args Args) MatchKVList(key string, sources map[string]string) bool {
|
||||
fieldValues := args.fields[key]
|
||||
|
||||
//do not filter if there is no filter set or cannot determine filter
|
||||
if len(fieldValues) == 0 {
|
||||
|
@ -171,8 +197,8 @@ func (filters Args) MatchKVList(field string, sources map[string]string) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
for name2match := range fieldValues {
|
||||
testKV := strings.SplitN(name2match, "=", 2)
|
||||
for value := range fieldValues {
|
||||
testKV := strings.SplitN(value, "=", 2)
|
||||
|
||||
v, ok := sources[testKV[0]]
|
||||
if !ok {
|
||||
|
@ -186,16 +212,13 @@ func (filters Args) MatchKVList(field string, sources map[string]string) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
// Match returns true if the values for the specified field matches the source string
|
||||
// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}},
|
||||
// field is 'image.name' and source is 'ubuntu'
|
||||
// it returns true.
|
||||
func (filters Args) Match(field, source string) bool {
|
||||
if filters.ExactMatch(field, source) {
|
||||
// Match returns true if any of the values at key match the source string
|
||||
func (args Args) Match(field, source string) bool {
|
||||
if args.ExactMatch(field, source) {
|
||||
return true
|
||||
}
|
||||
|
||||
fieldValues := filters.fields[field]
|
||||
fieldValues := args.fields[field]
|
||||
for name2match := range fieldValues {
|
||||
match, err := regexp.MatchString(name2match, source)
|
||||
if err != nil {
|
||||
|
@ -208,9 +231,9 @@ func (filters Args) Match(field, source string) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// ExactMatch returns true if the source matches exactly one of the filters.
|
||||
func (filters Args) ExactMatch(field, source string) bool {
|
||||
fieldValues, ok := filters.fields[field]
|
||||
// ExactMatch returns true if the source matches exactly one of the values.
|
||||
func (args Args) ExactMatch(key, source string) bool {
|
||||
fieldValues, ok := args.fields[key]
|
||||
//do not filter if there is no filter set or cannot determine filter
|
||||
if !ok || len(fieldValues) == 0 {
|
||||
return true
|
||||
|
@ -220,14 +243,15 @@ func (filters Args) ExactMatch(field, source string) bool {
|
|||
return fieldValues[source]
|
||||
}
|
||||
|
||||
// UniqueExactMatch returns true if there is only one filter and the source matches exactly this one.
|
||||
func (filters Args) UniqueExactMatch(field, source string) bool {
|
||||
fieldValues := filters.fields[field]
|
||||
// UniqueExactMatch returns true if there is only one value and the source
|
||||
// matches exactly the value.
|
||||
func (args Args) UniqueExactMatch(key, source string) bool {
|
||||
fieldValues := args.fields[key]
|
||||
//do not filter if there is no filter set or cannot determine filter
|
||||
if len(fieldValues) == 0 {
|
||||
return true
|
||||
}
|
||||
if len(filters.fields[field]) != 1 {
|
||||
if len(args.fields[key]) != 1 {
|
||||
return false
|
||||
}
|
||||
|
||||
|
@ -235,14 +259,14 @@ func (filters Args) UniqueExactMatch(field, source string) bool {
|
|||
return fieldValues[source]
|
||||
}
|
||||
|
||||
// FuzzyMatch returns true if the source matches exactly one of the filters,
|
||||
// or the source has one of the filters as a prefix.
|
||||
func (filters Args) FuzzyMatch(field, source string) bool {
|
||||
if filters.ExactMatch(field, source) {
|
||||
// FuzzyMatch returns true if the source matches exactly one value, or the
|
||||
// source has one of the values as a prefix.
|
||||
func (args Args) FuzzyMatch(key, source string) bool {
|
||||
if args.ExactMatch(key, source) {
|
||||
return true
|
||||
}
|
||||
|
||||
fieldValues := filters.fields[field]
|
||||
fieldValues := args.fields[key]
|
||||
for prefix := range fieldValues {
|
||||
if strings.HasPrefix(source, prefix) {
|
||||
return true
|
||||
|
@ -251,9 +275,17 @@ func (filters Args) FuzzyMatch(field, source string) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// Include returns true if the name of the field to filter is in the filters.
|
||||
func (filters Args) Include(field string) bool {
|
||||
_, ok := filters.fields[field]
|
||||
// Include returns true if the key exists in the mapping
|
||||
//
|
||||
// Deprecated: use Contains
|
||||
func (args Args) Include(field string) bool {
|
||||
_, ok := args.fields[field]
|
||||
return ok
|
||||
}
|
||||
|
||||
// Contains returns true if the key exists in the mapping
|
||||
func (args Args) Contains(field string) bool {
|
||||
_, ok := args.fields[field]
|
||||
return ok
|
||||
}
|
||||
|
||||
|
@ -265,10 +297,10 @@ func (e invalidFilter) Error() string {
|
|||
|
||||
func (invalidFilter) InvalidParameter() {}
|
||||
|
||||
// Validate ensures that all the fields in the filter are valid.
|
||||
// It returns an error as soon as it finds an invalid field.
|
||||
func (filters Args) Validate(accepted map[string]bool) error {
|
||||
for name := range filters.fields {
|
||||
// Validate compared the set of accepted keys against the keys in the mapping.
|
||||
// An error is returned if any mapping keys are not in the accepted set.
|
||||
func (args Args) Validate(accepted map[string]bool) error {
|
||||
for name := range args.fields {
|
||||
if !accepted[name] {
|
||||
return invalidFilter(name)
|
||||
}
|
||||
|
@ -276,21 +308,14 @@ func (filters Args) Validate(accepted map[string]bool) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
type invalidFilterError string
|
||||
|
||||
func (e invalidFilterError) Error() string {
|
||||
return "Invalid filter: '" + string(e) + "'"
|
||||
}
|
||||
|
||||
func (invalidFilterError) InvalidParameter() {}
|
||||
|
||||
// WalkValues iterates over the list of filtered values for a field.
|
||||
// It stops the iteration if it finds an error and it returns that error.
|
||||
func (filters Args) WalkValues(field string, op func(value string) error) error {
|
||||
if _, ok := filters.fields[field]; !ok {
|
||||
// WalkValues iterates over the list of values for a key in the mapping and calls
|
||||
// op() for each value. If op returns an error the iteration stops and the
|
||||
// error is returned.
|
||||
func (args Args) WalkValues(field string, op func(value string) error) error {
|
||||
if _, ok := args.fields[field]; !ok {
|
||||
return nil
|
||||
}
|
||||
for v := range filters.fields[field] {
|
||||
for v := range args.fields[field] {
|
||||
if err := op(v); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -67,7 +67,7 @@ var Propagations = []Propagation{
|
|||
type Consistency string
|
||||
|
||||
const (
|
||||
// ConsistencyFull guarantees bind-mount-like consistency
|
||||
// ConsistencyFull guarantees bind mount-like consistency
|
||||
ConsistencyFull Consistency = "consistent"
|
||||
// ConsistencyCached mounts can cache read data and FS structure
|
||||
ConsistencyCached Consistency = "cached"
|
||||
|
|
|
@ -29,10 +29,8 @@ func GetTimestamp(value string, reference time.Time) (string, error) {
|
|||
}
|
||||
|
||||
var format string
|
||||
var parseInLocation bool
|
||||
|
||||
// if the string has a Z or a + or three dashes use parse otherwise use parseinlocation
|
||||
parseInLocation = !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3)
|
||||
parseInLocation := !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3)
|
||||
|
||||
if strings.Contains(value, ".") {
|
||||
if parseInLocation {
|
||||
|
|
|
@ -19,7 +19,7 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
errDockerfileNotStringArray = errors.New("When using JSON array syntax, arrays must be comprised of strings only.")
|
||||
errDockerfileNotStringArray = errors.New("when using JSON array syntax, arrays must be comprised of strings only")
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
|
@ -143,7 +143,7 @@ func (d *Directive) possibleParserDirective(line string) error {
|
|||
if len(tecMatch) != 0 {
|
||||
for i, n := range tokenEscapeCommand.SubexpNames() {
|
||||
if n == "escapechar" {
|
||||
if d.escapeSeen == true {
|
||||
if d.escapeSeen {
|
||||
return errors.New("only one escape parser directive can be used")
|
||||
}
|
||||
d.escapeSeen = true
|
||||
|
@ -159,7 +159,7 @@ func (d *Directive) possibleParserDirective(line string) error {
|
|||
if len(tpcMatch) != 0 {
|
||||
for i, n := range tokenPlatformCommand.SubexpNames() {
|
||||
if n == "platform" {
|
||||
if d.platformSeen == true {
|
||||
if d.platformSeen {
|
||||
return errors.New("only one platform parser directive can be used")
|
||||
}
|
||||
d.platformSeen = true
|
||||
|
@ -290,6 +290,10 @@ func Parse(rwc io.Reader) (*Result, error) {
|
|||
}
|
||||
currentLine++
|
||||
|
||||
if isComment(scanner.Bytes()) {
|
||||
// original line was a comment (processLine strips comments)
|
||||
continue
|
||||
}
|
||||
if isEmptyContinuationLine(bytesRead) {
|
||||
hasEmptyContinuationLine = true
|
||||
continue
|
||||
|
@ -331,8 +335,12 @@ func trimWhitespace(src []byte) []byte {
|
|||
return bytes.TrimLeftFunc(src, unicode.IsSpace)
|
||||
}
|
||||
|
||||
func isComment(line []byte) bool {
|
||||
return tokenComment.Match(trimWhitespace(line))
|
||||
}
|
||||
|
||||
func isEmptyContinuationLine(line []byte) bool {
|
||||
return len(trimComments(trimWhitespace(line))) == 0
|
||||
return len(trimWhitespace(line)) == 0
|
||||
}
|
||||
|
||||
var utf8bom = []byte{0xEF, 0xBB, 0xBF}
|
||||
|
|
|
@ -2,7 +2,6 @@ package client
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
|
@ -20,10 +19,7 @@ func (cli *Client) CheckpointList(ctx context.Context, container string, options
|
|||
|
||||
resp, err := cli.get(ctx, "/containers/"+container+"/checkpoints", query, nil)
|
||||
if err != nil {
|
||||
if resp.statusCode == http.StatusNotFound {
|
||||
return checkpoints, containerNotFoundError{container}
|
||||
}
|
||||
return checkpoints, err
|
||||
return checkpoints, wrapResponseError(err, resp, "container", container)
|
||||
}
|
||||
|
||||
err = json.NewDecoder(resp.body).Decode(&checkpoints)
|
||||
|
|
|
@ -1,10 +1,6 @@
|
|||
/*
|
||||
Package client is a Go client for the Docker Engine API.
|
||||
|
||||
The "docker" command uses this package to communicate with the daemon. It can also
|
||||
be used by your own Go applications to do anything the command-line interface does
|
||||
- running containers, pulling images, managing swarms, etc.
|
||||
|
||||
For more information about the Engine API, see the documentation:
|
||||
https://docs.docker.com/engine/reference/api/
|
||||
|
||||
|
@ -160,7 +156,7 @@ func NewEnvClient() (*Client, error) {
|
|||
// highly recommended that you set a version or your client may break if the
|
||||
// server is upgraded.
|
||||
func NewClient(host string, version string, client *http.Client, httpHeaders map[string]string) (*Client, error) {
|
||||
proto, addr, basePath, err := ParseHost(host)
|
||||
hostURL, err := ParseHostURL(host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -171,7 +167,7 @@ func NewClient(host string, version string, client *http.Client, httpHeaders map
|
|||
}
|
||||
} else {
|
||||
transport := new(http.Transport)
|
||||
sockets.ConfigureTransport(transport, proto, addr)
|
||||
sockets.ConfigureTransport(transport, hostURL.Scheme, hostURL.Host)
|
||||
client = &http.Client{
|
||||
Transport: transport,
|
||||
CheckRedirect: CheckRedirect,
|
||||
|
@ -189,28 +185,24 @@ func NewClient(host string, version string, client *http.Client, httpHeaders map
|
|||
scheme = "https"
|
||||
}
|
||||
|
||||
// TODO: store URL instead of proto/addr/basePath
|
||||
return &Client{
|
||||
scheme: scheme,
|
||||
host: host,
|
||||
proto: proto,
|
||||
addr: addr,
|
||||
basePath: basePath,
|
||||
proto: hostURL.Scheme,
|
||||
addr: hostURL.Host,
|
||||
basePath: hostURL.Path,
|
||||
client: client,
|
||||
version: version,
|
||||
customHTTPHeaders: httpHeaders,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Close ensures that transport.Client is closed
|
||||
// especially needed while using NewClient with *http.Client = nil
|
||||
// for example
|
||||
// client.NewClient("unix:///var/run/docker.sock", nil, "v1.18", map[string]string{"User-Agent": "engine-api-cli-1.0"})
|
||||
// Close the transport used by the client
|
||||
func (cli *Client) Close() error {
|
||||
|
||||
if t, ok := cli.client.Transport.(*http.Transport); ok {
|
||||
t.CloseIdleConnections()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -220,37 +212,27 @@ func (cli *Client) getAPIPath(p string, query url.Values) string {
|
|||
var apiPath string
|
||||
if cli.version != "" {
|
||||
v := strings.TrimPrefix(cli.version, "v")
|
||||
apiPath = path.Join(cli.basePath, "/v"+v+p)
|
||||
apiPath = path.Join(cli.basePath, "/v"+v, p)
|
||||
} else {
|
||||
apiPath = path.Join(cli.basePath, p)
|
||||
}
|
||||
|
||||
u := &url.URL{
|
||||
Path: apiPath,
|
||||
}
|
||||
if len(query) > 0 {
|
||||
u.RawQuery = query.Encode()
|
||||
}
|
||||
return u.String()
|
||||
return (&url.URL{Path: apiPath, RawQuery: query.Encode()}).String()
|
||||
}
|
||||
|
||||
// ClientVersion returns the version string associated with this
|
||||
// instance of the Client. Note that this value can be changed
|
||||
// via the DOCKER_API_VERSION env var.
|
||||
// This operation doesn't acquire a mutex.
|
||||
// ClientVersion returns the API version used by this client.
|
||||
func (cli *Client) ClientVersion() string {
|
||||
return cli.version
|
||||
}
|
||||
|
||||
// NegotiateAPIVersion updates the version string associated with this
|
||||
// instance of the Client to match the latest version the server supports
|
||||
// NegotiateAPIVersion queries the API and updates the version to match the
|
||||
// API version. Any errors are silently ignored.
|
||||
func (cli *Client) NegotiateAPIVersion(ctx context.Context) {
|
||||
ping, _ := cli.Ping(ctx)
|
||||
cli.NegotiateAPIVersionPing(ping)
|
||||
}
|
||||
|
||||
// NegotiateAPIVersionPing updates the version string associated with this
|
||||
// instance of the Client to match the latest version the server supports
|
||||
// NegotiateAPIVersionPing updates the client version to match the Ping.APIVersion
|
||||
// if the ping version is less than the default version.
|
||||
func (cli *Client) NegotiateAPIVersionPing(p types.Ping) {
|
||||
if cli.manualOverride {
|
||||
return
|
||||
|
@ -266,23 +248,34 @@ func (cli *Client) NegotiateAPIVersionPing(p types.Ping) {
|
|||
cli.version = api.DefaultVersion
|
||||
}
|
||||
|
||||
// if server version is lower than the maximum version supported by the Client, downgrade
|
||||
if versions.LessThan(p.APIVersion, api.DefaultVersion) {
|
||||
// if server version is lower than the client version, downgrade
|
||||
if versions.LessThan(p.APIVersion, cli.version) {
|
||||
cli.version = p.APIVersion
|
||||
}
|
||||
}
|
||||
|
||||
// DaemonHost returns the host associated with this instance of the Client.
|
||||
// This operation doesn't acquire a mutex.
|
||||
// DaemonHost returns the host address used by the client
|
||||
func (cli *Client) DaemonHost() string {
|
||||
return cli.host
|
||||
}
|
||||
|
||||
// ParseHost verifies that the given host strings is valid.
|
||||
// ParseHost parses a url string, validates the strings is a host url, and returns
|
||||
// the parsed host as: protocol, address, and base path
|
||||
// Deprecated: use ParseHostURL
|
||||
func ParseHost(host string) (string, string, string, error) {
|
||||
hostURL, err := ParseHostURL(host)
|
||||
if err != nil {
|
||||
return "", "", "", err
|
||||
}
|
||||
return hostURL.Scheme, hostURL.Host, hostURL.Path, nil
|
||||
}
|
||||
|
||||
// ParseHostURL parses a url string, validates the string is a host url, and
|
||||
// returns the parsed URL
|
||||
func ParseHostURL(host string) (*url.URL, error) {
|
||||
protoAddrParts := strings.SplitN(host, "://", 2)
|
||||
if len(protoAddrParts) == 1 {
|
||||
return "", "", "", fmt.Errorf("unable to parse docker host `%s`", host)
|
||||
return nil, fmt.Errorf("unable to parse docker host `%s`", host)
|
||||
}
|
||||
|
||||
var basePath string
|
||||
|
@ -290,16 +283,19 @@ func ParseHost(host string) (string, string, string, error) {
|
|||
if proto == "tcp" {
|
||||
parsed, err := url.Parse("tcp://" + addr)
|
||||
if err != nil {
|
||||
return "", "", "", err
|
||||
return nil, err
|
||||
}
|
||||
addr = parsed.Host
|
||||
basePath = parsed.Path
|
||||
}
|
||||
return proto, addr, basePath, nil
|
||||
return &url.URL{
|
||||
Scheme: proto,
|
||||
Host: addr,
|
||||
Path: basePath,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CustomHTTPHeaders returns the custom http headers associated with this
|
||||
// instance of the Client. This operation doesn't acquire a mutex.
|
||||
// CustomHTTPHeaders returns the custom http headers stored by the client.
|
||||
func (cli *Client) CustomHTTPHeaders() map[string]string {
|
||||
m := make(map[string]string)
|
||||
for k, v := range cli.customHTTPHeaders {
|
||||
|
@ -308,8 +304,7 @@ func (cli *Client) CustomHTTPHeaders() map[string]string {
|
|||
return m
|
||||
}
|
||||
|
||||
// SetCustomHTTPHeaders updates the custom http headers associated with this
|
||||
// instance of the Client. This operation doesn't acquire a mutex.
|
||||
// SetCustomHTTPHeaders that will be set on every HTTP request made by the client.
|
||||
func (cli *Client) SetCustomHTTPHeaders(headers map[string]string) {
|
||||
cli.customHTTPHeaders = headers
|
||||
}
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"bytes"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"golang.org/x/net/context"
|
||||
|
@ -17,10 +16,7 @@ func (cli *Client) ConfigInspectWithRaw(ctx context.Context, id string) (swarm.C
|
|||
}
|
||||
resp, err := cli.get(ctx, "/configs/"+id, nil, nil)
|
||||
if err != nil {
|
||||
if resp.statusCode == http.StatusNotFound {
|
||||
return swarm.Config{}, nil, configNotFoundError{id}
|
||||
}
|
||||
return swarm.Config{}, nil, err
|
||||
return swarm.Config{}, nil, wrapResponseError(err, resp, "config", id)
|
||||
}
|
||||
defer ensureReaderClosed(resp)
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@ func (cli *Client) ConfigList(ctx context.Context, options types.ConfigListOptio
|
|||
query := url.Values{}
|
||||
|
||||
if options.Filters.Len() > 0 {
|
||||
filterJSON, err := filters.ToParam(options.Filters)
|
||||
filterJSON, err := filters.ToJSON(options.Filters)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -9,5 +9,5 @@ func (cli *Client) ConfigRemove(ctx context.Context, id string) error {
|
|||
}
|
||||
resp, err := cli.delete(ctx, "/configs/"+id, nil, nil)
|
||||
ensureReaderClosed(resp)
|
||||
return err
|
||||
return wrapResponseError(err, resp, "config", id)
|
||||
}
|
||||
|
|
|
@ -39,7 +39,7 @@ func (cli *Client) ContainerCommit(ctx context.Context, container string, option
|
|||
for _, change := range options.Changes {
|
||||
query.Add("changes", change)
|
||||
}
|
||||
if options.Pause != true {
|
||||
if !options.Pause {
|
||||
query.Set("pause", "0")
|
||||
}
|
||||
|
||||
|
|
|
@ -45,7 +45,7 @@ func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config
|
|||
serverResp, err := cli.post(ctx, "/containers/create", query, body, nil)
|
||||
if err != nil {
|
||||
if serverResp.statusCode == 404 && strings.Contains(err.Error(), "No such image") {
|
||||
return response, imageNotFoundError{config.Image}
|
||||
return response, objectNotFoundError{object: "image", id: config.Image}
|
||||
}
|
||||
return response, err
|
||||
}
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"bytes"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
|
@ -15,10 +14,7 @@ import (
|
|||
func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) {
|
||||
serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil)
|
||||
if err != nil {
|
||||
if serverResp.statusCode == http.StatusNotFound {
|
||||
return types.ContainerJSON{}, containerNotFoundError{containerID}
|
||||
}
|
||||
return types.ContainerJSON{}, err
|
||||
return types.ContainerJSON{}, wrapResponseError(err, serverResp, "container", containerID)
|
||||
}
|
||||
|
||||
var response types.ContainerJSON
|
||||
|
@ -35,10 +31,7 @@ func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID stri
|
|||
}
|
||||
serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil)
|
||||
if err != nil {
|
||||
if serverResp.statusCode == http.StatusNotFound {
|
||||
return types.ContainerJSON{}, nil, containerNotFoundError{containerID}
|
||||
}
|
||||
return types.ContainerJSON{}, nil, err
|
||||
return types.ContainerJSON{}, nil, wrapResponseError(err, serverResp, "container", containerID)
|
||||
}
|
||||
defer ensureReaderClosed(serverResp)
|
||||
|
||||
|
|
|
@ -23,5 +23,5 @@ func (cli *Client) ContainerRemove(ctx context.Context, containerID string, opti
|
|||
|
||||
resp, err := cli.delete(ctx, "/containers/"+containerID, query, nil)
|
||||
ensureReaderClosed(resp)
|
||||
return err
|
||||
return wrapResponseError(err, resp, "container", containerID)
|
||||
}
|
||||
|
|
|
@ -3,6 +3,8 @@ package client
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"net/http"
|
||||
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
@ -36,95 +38,37 @@ type notFound interface {
|
|||
NotFound() bool // Is the error a NotFound error
|
||||
}
|
||||
|
||||
// IsErrNotFound returns true if the error is caused with an
|
||||
// object (image, container, network, volume, …) is not found in the docker host.
|
||||
// IsErrNotFound returns true if the error is a NotFound error, which is returned
|
||||
// by the API when some object is not found.
|
||||
func IsErrNotFound(err error) bool {
|
||||
te, ok := err.(notFound)
|
||||
return ok && te.NotFound()
|
||||
}
|
||||
|
||||
// imageNotFoundError implements an error returned when an image is not in the docker host.
|
||||
type imageNotFoundError struct {
|
||||
imageID string
|
||||
type objectNotFoundError struct {
|
||||
object string
|
||||
id string
|
||||
}
|
||||
|
||||
// NotFound indicates that this error type is of NotFound
|
||||
func (e imageNotFoundError) NotFound() bool {
|
||||
func (e objectNotFoundError) NotFound() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Error returns a string representation of an imageNotFoundError
|
||||
func (e imageNotFoundError) Error() string {
|
||||
return fmt.Sprintf("Error: No such image: %s", e.imageID)
|
||||
func (e objectNotFoundError) Error() string {
|
||||
return fmt.Sprintf("Error: No such %s: %s", e.object, e.id)
|
||||
}
|
||||
|
||||
// IsErrImageNotFound returns true if the error is caused
|
||||
// when an image is not found in the docker host.
|
||||
func IsErrImageNotFound(err error) bool {
|
||||
return IsErrNotFound(err)
|
||||
}
|
||||
|
||||
// containerNotFoundError implements an error returned when a container is not in the docker host.
|
||||
type containerNotFoundError struct {
|
||||
containerID string
|
||||
}
|
||||
|
||||
// NotFound indicates that this error type is of NotFound
|
||||
func (e containerNotFoundError) NotFound() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Error returns a string representation of a containerNotFoundError
|
||||
func (e containerNotFoundError) Error() string {
|
||||
return fmt.Sprintf("Error: No such container: %s", e.containerID)
|
||||
}
|
||||
|
||||
// IsErrContainerNotFound returns true if the error is caused
|
||||
// when a container is not found in the docker host.
|
||||
func IsErrContainerNotFound(err error) bool {
|
||||
return IsErrNotFound(err)
|
||||
}
|
||||
|
||||
// networkNotFoundError implements an error returned when a network is not in the docker host.
|
||||
type networkNotFoundError struct {
|
||||
networkID string
|
||||
}
|
||||
|
||||
// NotFound indicates that this error type is of NotFound
|
||||
func (e networkNotFoundError) NotFound() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Error returns a string representation of a networkNotFoundError
|
||||
func (e networkNotFoundError) Error() string {
|
||||
return fmt.Sprintf("Error: No such network: %s", e.networkID)
|
||||
}
|
||||
|
||||
// IsErrNetworkNotFound returns true if the error is caused
|
||||
// when a network is not found in the docker host.
|
||||
func IsErrNetworkNotFound(err error) bool {
|
||||
return IsErrNotFound(err)
|
||||
}
|
||||
|
||||
// volumeNotFoundError implements an error returned when a volume is not in the docker host.
|
||||
type volumeNotFoundError struct {
|
||||
volumeID string
|
||||
}
|
||||
|
||||
// NotFound indicates that this error type is of NotFound
|
||||
func (e volumeNotFoundError) NotFound() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Error returns a string representation of a volumeNotFoundError
|
||||
func (e volumeNotFoundError) Error() string {
|
||||
return fmt.Sprintf("Error: No such volume: %s", e.volumeID)
|
||||
}
|
||||
|
||||
// IsErrVolumeNotFound returns true if the error is caused
|
||||
// when a volume is not found in the docker host.
|
||||
func IsErrVolumeNotFound(err error) bool {
|
||||
return IsErrNotFound(err)
|
||||
func wrapResponseError(err error, resp serverResponse, object, id string) error {
|
||||
switch {
|
||||
case err == nil:
|
||||
return nil
|
||||
case resp.statusCode == http.StatusNotFound:
|
||||
return objectNotFoundError{object: object, id: id}
|
||||
case resp.statusCode == http.StatusNotImplemented:
|
||||
return notImplementedError{message: err.Error()}
|
||||
default:
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// unauthorizedError represents an authorization error in a remote registry.
|
||||
|
@ -144,72 +88,6 @@ func IsErrUnauthorized(err error) bool {
|
|||
return ok
|
||||
}
|
||||
|
||||
// nodeNotFoundError implements an error returned when a node is not found.
|
||||
type nodeNotFoundError struct {
|
||||
nodeID string
|
||||
}
|
||||
|
||||
// Error returns a string representation of a nodeNotFoundError
|
||||
func (e nodeNotFoundError) Error() string {
|
||||
return fmt.Sprintf("Error: No such node: %s", e.nodeID)
|
||||
}
|
||||
|
||||
// NotFound indicates that this error type is of NotFound
|
||||
func (e nodeNotFoundError) NotFound() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// IsErrNodeNotFound returns true if the error is caused
|
||||
// when a node is not found.
|
||||
func IsErrNodeNotFound(err error) bool {
|
||||
_, ok := err.(nodeNotFoundError)
|
||||
return ok
|
||||
}
|
||||
|
||||
// serviceNotFoundError implements an error returned when a service is not found.
|
||||
type serviceNotFoundError struct {
|
||||
serviceID string
|
||||
}
|
||||
|
||||
// Error returns a string representation of a serviceNotFoundError
|
||||
func (e serviceNotFoundError) Error() string {
|
||||
return fmt.Sprintf("Error: No such service: %s", e.serviceID)
|
||||
}
|
||||
|
||||
// NotFound indicates that this error type is of NotFound
|
||||
func (e serviceNotFoundError) NotFound() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// IsErrServiceNotFound returns true if the error is caused
|
||||
// when a service is not found.
|
||||
func IsErrServiceNotFound(err error) bool {
|
||||
_, ok := err.(serviceNotFoundError)
|
||||
return ok
|
||||
}
|
||||
|
||||
// taskNotFoundError implements an error returned when a task is not found.
|
||||
type taskNotFoundError struct {
|
||||
taskID string
|
||||
}
|
||||
|
||||
// Error returns a string representation of a taskNotFoundError
|
||||
func (e taskNotFoundError) Error() string {
|
||||
return fmt.Sprintf("Error: No such task: %s", e.taskID)
|
||||
}
|
||||
|
||||
// NotFound indicates that this error type is of NotFound
|
||||
func (e taskNotFoundError) NotFound() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// IsErrTaskNotFound returns true if the error is caused
|
||||
// when a task is not found.
|
||||
func IsErrTaskNotFound(err error) bool {
|
||||
_, ok := err.(taskNotFoundError)
|
||||
return ok
|
||||
}
|
||||
|
||||
type pluginPermissionDenied struct {
|
||||
name string
|
||||
}
|
||||
|
@ -225,6 +103,26 @@ func IsErrPluginPermissionDenied(err error) bool {
|
|||
return ok
|
||||
}
|
||||
|
||||
type notImplementedError struct {
|
||||
message string
|
||||
}
|
||||
|
||||
func (e notImplementedError) Error() string {
|
||||
return e.message
|
||||
}
|
||||
|
||||
func (e notImplementedError) NotImplemented() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// IsErrNotImplemented returns true if the error is a NotImplemented error.
|
||||
// This is returned by the API when a requested feature has not been
|
||||
// implemented.
|
||||
func IsErrNotImplemented(err error) bool {
|
||||
te, ok := err.(notImplementedError)
|
||||
return ok && te.NotImplemented()
|
||||
}
|
||||
|
||||
// NewVersionError returns an error if the APIVersion required
|
||||
// if less than the current supported version
|
||||
func (cli *Client) NewVersionError(APIrequired, feature string) error {
|
||||
|
@ -233,68 +131,3 @@ func (cli *Client) NewVersionError(APIrequired, feature string) error {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// secretNotFoundError implements an error returned when a secret is not found.
|
||||
type secretNotFoundError struct {
|
||||
name string
|
||||
}
|
||||
|
||||
// Error returns a string representation of a secretNotFoundError
|
||||
func (e secretNotFoundError) Error() string {
|
||||
return fmt.Sprintf("Error: no such secret: %s", e.name)
|
||||
}
|
||||
|
||||
// NotFound indicates that this error type is of NotFound
|
||||
func (e secretNotFoundError) NotFound() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// IsErrSecretNotFound returns true if the error is caused
|
||||
// when a secret is not found.
|
||||
func IsErrSecretNotFound(err error) bool {
|
||||
_, ok := err.(secretNotFoundError)
|
||||
return ok
|
||||
}
|
||||
|
||||
// configNotFoundError implements an error returned when a config is not found.
|
||||
type configNotFoundError struct {
|
||||
name string
|
||||
}
|
||||
|
||||
// Error returns a string representation of a configNotFoundError
|
||||
func (e configNotFoundError) Error() string {
|
||||
return fmt.Sprintf("Error: no such config: %s", e.name)
|
||||
}
|
||||
|
||||
// NotFound indicates that this error type is of NotFound
|
||||
func (e configNotFoundError) NotFound() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// IsErrConfigNotFound returns true if the error is caused
|
||||
// when a config is not found.
|
||||
func IsErrConfigNotFound(err error) bool {
|
||||
_, ok := err.(configNotFoundError)
|
||||
return ok
|
||||
}
|
||||
|
||||
// pluginNotFoundError implements an error returned when a plugin is not in the docker host.
|
||||
type pluginNotFoundError struct {
|
||||
name string
|
||||
}
|
||||
|
||||
// NotFound indicates that this error type is of NotFound
|
||||
func (e pluginNotFoundError) NotFound() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Error returns a string representation of a pluginNotFoundError
|
||||
func (e pluginNotFoundError) Error() string {
|
||||
return fmt.Sprintf("Error: No such plugin: %s", e.name)
|
||||
}
|
||||
|
||||
// IsErrPluginNotFound returns true if the error is caused
|
||||
// when a plugin is not found in the docker host.
|
||||
func IsErrPluginNotFound(err error) bool {
|
||||
return IsErrNotFound(err)
|
||||
}
|
||||
|
|
|
@ -12,7 +12,6 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/pkg/tlsconfig"
|
||||
"github.com/docker/go-connections/sockets"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
|
@ -71,7 +70,7 @@ func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Con
|
|||
timeout := dialer.Timeout
|
||||
|
||||
if !dialer.Deadline.IsZero() {
|
||||
deadlineTimeout := dialer.Deadline.Sub(time.Now())
|
||||
deadlineTimeout := time.Until(dialer.Deadline)
|
||||
if timeout == 0 || deadlineTimeout < timeout {
|
||||
timeout = deadlineTimeout
|
||||
}
|
||||
|
@ -115,7 +114,7 @@ func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Con
|
|||
// from the hostname we're connecting to.
|
||||
if config.ServerName == "" {
|
||||
// Make a copy to avoid polluting argument or default.
|
||||
config = tlsconfig.Clone(config)
|
||||
config = tlsConfigClone(config)
|
||||
config.ServerName = hostname
|
||||
}
|
||||
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"bytes"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"golang.org/x/net/context"
|
||||
|
@ -14,10 +13,7 @@ import (
|
|||
func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string) (types.ImageInspect, []byte, error) {
|
||||
serverResp, err := cli.get(ctx, "/images/"+imageID+"/json", nil, nil)
|
||||
if err != nil {
|
||||
if serverResp.statusCode == http.StatusNotFound {
|
||||
return types.ImageInspect{}, nil, imageNotFoundError{imageID}
|
||||
}
|
||||
return types.ImageInspect{}, nil, err
|
||||
return types.ImageInspect{}, nil, wrapResponseError(err, serverResp, "image", imageID)
|
||||
}
|
||||
defer ensureReaderClosed(serverResp)
|
||||
|
||||
|
|
|
@ -2,7 +2,6 @@ package client
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
|
@ -20,15 +19,12 @@ func (cli *Client) ImageRemove(ctx context.Context, imageID string, options type
|
|||
query.Set("noprune", "1")
|
||||
}
|
||||
|
||||
var dels []types.ImageDeleteResponseItem
|
||||
resp, err := cli.delete(ctx, "/images/"+imageID, query, nil)
|
||||
if err != nil {
|
||||
if resp.statusCode == http.StatusNotFound {
|
||||
return nil, imageNotFoundError{imageID}
|
||||
}
|
||||
return nil, err
|
||||
return dels, wrapResponseError(err, resp, "image", imageID)
|
||||
}
|
||||
|
||||
var dels []types.ImageDeleteResponseItem
|
||||
err = json.NewDecoder(resp.body).Decode(&dels)
|
||||
ensureReaderClosed(resp)
|
||||
return dels, err
|
||||
|
|
|
@ -21,7 +21,7 @@ func (cli *Client) ImageSearch(ctx context.Context, term string, options types.I
|
|||
query.Set("limit", fmt.Sprintf("%d", options.Limit))
|
||||
|
||||
if options.Filters.Len() > 0 {
|
||||
filterJSON, err := filters.ToParam(options.Filters)
|
||||
filterJSON, err := filters.ToJSON(options.Filters)
|
||||
if err != nil {
|
||||
return results, err
|
||||
}
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"bytes"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
|
@ -33,10 +32,7 @@ func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string,
|
|||
}
|
||||
resp, err = cli.get(ctx, "/networks/"+networkID, query, nil)
|
||||
if err != nil {
|
||||
if resp.statusCode == http.StatusNotFound {
|
||||
return networkResource, nil, networkNotFoundError{networkID}
|
||||
}
|
||||
return networkResource, nil, err
|
||||
return networkResource, nil, wrapResponseError(err, resp, "network", networkID)
|
||||
}
|
||||
defer ensureReaderClosed(resp)
|
||||
|
||||
|
|
|
@ -6,5 +6,5 @@ import "golang.org/x/net/context"
|
|||
func (cli *Client) NetworkRemove(ctx context.Context, networkID string) error {
|
||||
resp, err := cli.delete(ctx, "/networks/"+networkID, nil, nil)
|
||||
ensureReaderClosed(resp)
|
||||
return err
|
||||
return wrapResponseError(err, resp, "network", networkID)
|
||||
}
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"bytes"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"golang.org/x/net/context"
|
||||
|
@ -14,10 +13,7 @@ import (
|
|||
func (cli *Client) NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) {
|
||||
serverResp, err := cli.get(ctx, "/nodes/"+nodeID, nil, nil)
|
||||
if err != nil {
|
||||
if serverResp.statusCode == http.StatusNotFound {
|
||||
return swarm.Node{}, nil, nodeNotFoundError{nodeID}
|
||||
}
|
||||
return swarm.Node{}, nil, err
|
||||
return swarm.Node{}, nil, wrapResponseError(err, serverResp, "node", nodeID)
|
||||
}
|
||||
defer ensureReaderClosed(serverResp)
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@ func (cli *Client) NodeList(ctx context.Context, options types.NodeListOptions)
|
|||
query := url.Values{}
|
||||
|
||||
if options.Filters.Len() > 0 {
|
||||
filterJSON, err := filters.ToParam(options.Filters)
|
||||
filterJSON, err := filters.ToJSON(options.Filters)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -17,5 +17,5 @@ func (cli *Client) NodeRemove(ctx context.Context, nodeID string, options types.
|
|||
|
||||
resp, err := cli.delete(ctx, "/nodes/"+nodeID, query, nil)
|
||||
ensureReaderClosed(resp)
|
||||
return err
|
||||
return wrapResponseError(err, resp, "node", nodeID)
|
||||
}
|
||||
|
|
|
@ -1,41 +0,0 @@
|
|||
package client
|
||||
|
||||
// parse_logs.go contains utility helpers for getting information out of docker
|
||||
// log lines. really, it only contains ParseDetails right now. maybe in the
|
||||
// future there will be some desire to parse log messages back into a struct?
|
||||
// that would go here if we did
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ParseLogDetails takes a details string of key value pairs in the form
|
||||
// "k=v,l=w", where the keys and values are url query escaped, and each pair
|
||||
// is separated by a comma, returns a map. returns an error if the details
|
||||
// string is not in a valid format
|
||||
// the exact form of details encoding is implemented in
|
||||
// api/server/httputils/write_log_stream.go
|
||||
func ParseLogDetails(details string) (map[string]string, error) {
|
||||
pairs := strings.Split(details, ",")
|
||||
detailsMap := make(map[string]string, len(pairs))
|
||||
for _, pair := range pairs {
|
||||
p := strings.SplitN(pair, "=", 2)
|
||||
// if there is no equals sign, we will only get 1 part back
|
||||
if len(p) != 2 {
|
||||
return nil, errors.New("invalid details format")
|
||||
}
|
||||
k, err := url.QueryUnescape(p[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v, err := url.QueryUnescape(p[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
detailsMap[k] = v
|
||||
}
|
||||
return detailsMap, nil
|
||||
}
|
|
@ -28,7 +28,5 @@ func (cli *Client) Ping(ctx context.Context) (types.Ping, error) {
|
|||
}
|
||||
ping.OSType = serverResp.header.Get("OSType")
|
||||
}
|
||||
|
||||
err = cli.checkResponseErr(serverResp)
|
||||
return ping, err
|
||||
return ping, cli.checkResponseErr(serverResp)
|
||||
}
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"bytes"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"golang.org/x/net/context"
|
||||
|
@ -14,10 +13,7 @@ import (
|
|||
func (cli *Client) PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) {
|
||||
resp, err := cli.get(ctx, "/plugins/"+name+"/json", nil, nil)
|
||||
if err != nil {
|
||||
if resp.statusCode == http.StatusNotFound {
|
||||
return nil, nil, pluginNotFoundError{name}
|
||||
}
|
||||
return nil, nil, err
|
||||
return nil, nil, wrapResponseError(err, resp, "plugin", name)
|
||||
}
|
||||
|
||||
defer ensureReaderClosed(resp)
|
||||
|
|
|
@ -23,7 +23,7 @@ func (cli *Client) PluginList(ctx context.Context, filter filters.Args) (types.P
|
|||
}
|
||||
resp, err := cli.get(ctx, "/plugins", query, nil)
|
||||
if err != nil {
|
||||
return plugins, err
|
||||
return plugins, wrapResponseError(err, resp, "plugin", "")
|
||||
}
|
||||
|
||||
err = json.NewDecoder(resp.body).Decode(&plugins)
|
||||
|
|
|
@ -16,5 +16,5 @@ func (cli *Client) PluginRemove(ctx context.Context, name string, options types.
|
|||
|
||||
resp, err := cli.delete(ctx, "/plugins/"+name, query, nil)
|
||||
ensureReaderClosed(resp)
|
||||
return err
|
||||
return wrapResponseError(err, resp, "plugin", name)
|
||||
}
|
||||
|
|
|
@ -203,7 +203,7 @@ func (cli *Client) checkResponseErr(serverResp serverResponse) error {
|
|||
return err
|
||||
}
|
||||
if len(body) == 0 {
|
||||
return fmt.Errorf("Error: request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), serverResp.reqURL)
|
||||
return fmt.Errorf("request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), serverResp.reqURL)
|
||||
}
|
||||
|
||||
var ct string
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"bytes"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"golang.org/x/net/context"
|
||||
|
@ -17,10 +16,7 @@ func (cli *Client) SecretInspectWithRaw(ctx context.Context, id string) (swarm.S
|
|||
}
|
||||
resp, err := cli.get(ctx, "/secrets/"+id, nil, nil)
|
||||
if err != nil {
|
||||
if resp.statusCode == http.StatusNotFound {
|
||||
return swarm.Secret{}, nil, secretNotFoundError{id}
|
||||
}
|
||||
return swarm.Secret{}, nil, err
|
||||
return swarm.Secret{}, nil, wrapResponseError(err, resp, "secret", id)
|
||||
}
|
||||
defer ensureReaderClosed(resp)
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@ func (cli *Client) SecretList(ctx context.Context, options types.SecretListOptio
|
|||
query := url.Values{}
|
||||
|
||||
if options.Filters.Len() > 0 {
|
||||
filterJSON, err := filters.ToParam(options.Filters)
|
||||
filterJSON, err := filters.ToJSON(options.Filters)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -9,5 +9,5 @@ func (cli *Client) SecretRemove(ctx context.Context, id string) error {
|
|||
}
|
||||
resp, err := cli.delete(ctx, "/secrets/"+id, nil, nil)
|
||||
ensureReaderClosed(resp)
|
||||
return err
|
||||
return wrapResponseError(err, resp, "secret", id)
|
||||
}
|
||||
|
|
|
@ -3,11 +3,12 @@ package client
|
|||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/opencontainers/go-digest"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
@ -85,21 +86,30 @@ func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec,
|
|||
return response, err
|
||||
}
|
||||
|
||||
func imageDigestAndPlatforms(ctx context.Context, cli *Client, image, encodedAuth string) (string, []swarm.Platform, error) {
|
||||
func imageDigestAndPlatforms(ctx context.Context, cli DistributionAPIClient, image, encodedAuth string) (string, []swarm.Platform, error) {
|
||||
distributionInspect, err := cli.DistributionInspect(ctx, image, encodedAuth)
|
||||
imageWithDigest := image
|
||||
var platforms []swarm.Platform
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
imageWithDigest = imageWithDigestString(image, distributionInspect.Descriptor.Digest)
|
||||
imageWithDigest := imageWithDigestString(image, distributionInspect.Descriptor.Digest)
|
||||
|
||||
if len(distributionInspect.Platforms) > 0 {
|
||||
platforms = make([]swarm.Platform, 0, len(distributionInspect.Platforms))
|
||||
for _, p := range distributionInspect.Platforms {
|
||||
// clear architecture field for arm. This is a temporary patch to address
|
||||
// https://github.com/docker/swarmkit/issues/2294. The issue is that while
|
||||
// image manifests report "arm" as the architecture, the node reports
|
||||
// something like "armv7l" (includes the variant), which causes arm images
|
||||
// to stop working with swarm mode. This patch removes the architecture
|
||||
// constraint for arm images to ensure tasks get scheduled.
|
||||
arch := p.Architecture
|
||||
if strings.ToLower(arch) == "arm" {
|
||||
arch = ""
|
||||
}
|
||||
platforms = append(platforms, swarm.Platform{
|
||||
Architecture: p.Architecture,
|
||||
Architecture: arch,
|
||||
OS: p.OS,
|
||||
})
|
||||
}
|
||||
|
|
|
@ -5,7 +5,6 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
|
@ -19,10 +18,7 @@ func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string,
|
|||
query.Set("insertDefaults", fmt.Sprintf("%v", opts.InsertDefaults))
|
||||
serverResp, err := cli.get(ctx, "/services/"+serviceID, query, nil)
|
||||
if err != nil {
|
||||
if serverResp.statusCode == http.StatusNotFound {
|
||||
return swarm.Service{}, nil, serviceNotFoundError{serviceID}
|
||||
}
|
||||
return swarm.Service{}, nil, err
|
||||
return swarm.Service{}, nil, wrapResponseError(err, serverResp, "service", serviceID)
|
||||
}
|
||||
defer ensureReaderClosed(serverResp)
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@ func (cli *Client) ServiceList(ctx context.Context, options types.ServiceListOpt
|
|||
query := url.Values{}
|
||||
|
||||
if options.Filters.Len() > 0 {
|
||||
filterJSON, err := filters.ToParam(options.Filters)
|
||||
filterJSON, err := filters.ToJSON(options.Filters)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -6,5 +6,5 @@ import "golang.org/x/net/context"
|
|||
func (cli *Client) ServiceRemove(ctx context.Context, serviceID string) error {
|
||||
resp, err := cli.delete(ctx, "/services/"+serviceID, nil, nil)
|
||||
ensureReaderClosed(resp)
|
||||
return err
|
||||
return wrapResponseError(err, resp, "service", serviceID)
|
||||
}
|
||||
|
|
|
@ -4,10 +4,8 @@ import (
|
|||
"bytes"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
|
@ -15,10 +13,7 @@ import (
|
|||
func (cli *Client) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) {
|
||||
serverResp, err := cli.get(ctx, "/tasks/"+taskID, nil, nil)
|
||||
if err != nil {
|
||||
if serverResp.statusCode == http.StatusNotFound {
|
||||
return swarm.Task{}, nil, taskNotFoundError{taskID}
|
||||
}
|
||||
return swarm.Task{}, nil, err
|
||||
return swarm.Task{}, nil, wrapResponseError(err, serverResp, "task", taskID)
|
||||
}
|
||||
defer ensureReaderClosed(serverResp)
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@ func (cli *Client) TaskList(ctx context.Context, options types.TaskListOptions)
|
|||
query := url.Values{}
|
||||
|
||||
if options.Filters.Len() > 0 {
|
||||
filterJSON, err := filters.ToParam(options.Filters)
|
||||
filterJSON, err := filters.ToJSON(options.Filters)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -0,0 +1,11 @@
|
|||
// +build go1.8
|
||||
|
||||
package client
|
||||
|
||||
import "crypto/tls"
|
||||
|
||||
// tlsConfigClone returns a clone of tls.Config. This function is provided for
|
||||
// compatibility for go1.7 that doesn't include this method in stdlib.
|
||||
func tlsConfigClone(c *tls.Config) *tls.Config {
|
||||
return c.Clone()
|
||||
}
|
|
@ -1,12 +1,12 @@
|
|||
// +build go1.7,!go1.8
|
||||
|
||||
package tlsconfig
|
||||
package client
|
||||
|
||||
import "crypto/tls"
|
||||
|
||||
// Clone returns a clone of tls.Config. This function is provided for
|
||||
// tlsConfigClone returns a clone of tls.Config. This function is provided for
|
||||
// compatibility for go1.7 that doesn't include this method in stdlib.
|
||||
func Clone(c *tls.Config) *tls.Config {
|
||||
func tlsConfigClone(c *tls.Config) *tls.Config {
|
||||
return &tls.Config{
|
||||
Rand: c.Rand,
|
||||
Time: c.Time,
|
|
@ -5,14 +5,6 @@ import (
|
|||
"net/http"
|
||||
)
|
||||
|
||||
// transportFunc allows us to inject a mock transport for testing. We define it
|
||||
// here so we can detect the tlsconfig and return nil for only this type.
|
||||
type transportFunc func(*http.Request) (*http.Response, error)
|
||||
|
||||
func (tf transportFunc) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
return tf(req)
|
||||
}
|
||||
|
||||
// resolveTLSConfig attempts to resolve the TLS configuration from the
|
||||
// RoundTripper.
|
||||
func resolveTLSConfig(transport http.RoundTripper) *tls.Config {
|
||||
|
|
|
@ -24,7 +24,7 @@ func getDockerOS(serverHeader string) string {
|
|||
func getFiltersQuery(f filters.Args) (url.Values, error) {
|
||||
query := url.Values{}
|
||||
if f.Len() > 0 {
|
||||
filterJSON, err := filters.ToParam(f)
|
||||
filterJSON, err := filters.ToJSON(f)
|
||||
if err != nil {
|
||||
return query, err
|
||||
}
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"bytes"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"path"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"golang.org/x/net/context"
|
||||
|
@ -18,13 +18,17 @@ func (cli *Client) VolumeInspect(ctx context.Context, volumeID string) (types.Vo
|
|||
|
||||
// VolumeInspectWithRaw returns the information about a specific volume in the docker host and its raw representation
|
||||
func (cli *Client) VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) {
|
||||
// The empty ID needs to be handled here because with an empty ID the
|
||||
// request url will not contain a trailing / which calls the volume list API
|
||||
// instead of volume inspect
|
||||
if volumeID == "" {
|
||||
return types.Volume{}, nil, objectNotFoundError{object: "volume", id: volumeID}
|
||||
}
|
||||
|
||||
var volume types.Volume
|
||||
resp, err := cli.get(ctx, "/volumes/"+volumeID, nil, nil)
|
||||
resp, err := cli.get(ctx, path.Join("/volumes", volumeID), nil, nil)
|
||||
if err != nil {
|
||||
if resp.statusCode == http.StatusNotFound {
|
||||
return volume, nil, volumeNotFoundError{volumeID}
|
||||
}
|
||||
return volume, nil, err
|
||||
return volume, nil, wrapResponseError(err, resp, "volume", volumeID)
|
||||
}
|
||||
defer ensureReaderClosed(resp)
|
||||
|
||||
|
|
|
@ -17,5 +17,5 @@ func (cli *Client) VolumeRemove(ctx context.Context, volumeID string, force bool
|
|||
}
|
||||
resp, err := cli.delete(ctx, "/volumes/"+volumeID, query, nil)
|
||||
ensureReaderClosed(resp)
|
||||
return err
|
||||
return wrapResponseError(err, resp, "volume", volumeID)
|
||||
}
|
||||
|
|
|
@ -0,0 +1,60 @@
|
|||
## About
|
||||
|
||||
This directory contains a collection of scripts used to build and manage this
|
||||
repository. If there are any issues regarding the intention of a particular
|
||||
script (or even part of a certain script), please reach out to us.
|
||||
It may help us either refine our current scripts, or add on new ones
|
||||
that are appropriate for a given use case.
|
||||
|
||||
## DinD (dind.sh)
|
||||
|
||||
DinD is a wrapper script which allows Docker to be run inside a Docker
|
||||
container. DinD requires the container to
|
||||
be run with privileged mode enabled.
|
||||
|
||||
## Generate Authors (generate-authors.sh)
|
||||
|
||||
Generates AUTHORS; a file with all the names and corresponding emails of
|
||||
individual contributors. AUTHORS can be found in the home directory of
|
||||
this repository.
|
||||
|
||||
## Make
|
||||
|
||||
There are two make files, each with different extensions. Neither are supposed
|
||||
to be called directly; only invoke `make`. Both scripts run inside a Docker
|
||||
container.
|
||||
|
||||
### make.ps1
|
||||
|
||||
- The Windows native build script that uses PowerShell semantics; it is limited
|
||||
unlike `hack\make.sh` since it does not provide support for the full set of
|
||||
operations provided by the Linux counterpart, `make.sh`. However, `make.ps1`
|
||||
does provide support for local Windows development and Windows to Windows CI.
|
||||
More information is found within `make.ps1` by the author, @jhowardmsft
|
||||
|
||||
### make.sh
|
||||
|
||||
- Referenced via `make test` when running tests on a local machine,
|
||||
or directly referenced when running tests inside a Docker development container.
|
||||
- When running on a local machine, `make test` to run all tests found in
|
||||
`test`, `test-unit`, `test-integration`, and `test-docker-py` on
|
||||
your local machine. The default timeout is set in `make.sh` to 60 minutes
|
||||
(`${TIMEOUT:=60m}`), since it currently takes up to an hour to run
|
||||
all of the tests.
|
||||
- When running inside a Docker development container, `hack/make.sh` does
|
||||
not have a single target that runs all the tests. You need to provide a
|
||||
single command line with multiple targets that performs the same thing.
|
||||
An example referenced from [Run targets inside a development container](https://docs.docker.com/opensource/project/test-and-docs/#run-targets-inside-a-development-container): `root@5f8630b873fe:/go/src/github.com/moby/moby# hack/make.sh dynbinary binary cross test-unit test-integration test-docker-py`
|
||||
- For more information related to testing outside the scope of this README,
|
||||
refer to
|
||||
[Run tests and test documentation](https://docs.docker.com/opensource/project/test-and-docs/)
|
||||
|
||||
## Release (release.sh)
|
||||
|
||||
Releases any bundles built by `make` on a public AWS S3 bucket.
|
||||
For information regarding configuration, please view `release.sh`.
|
||||
|
||||
## Vendor (vendor.sh)
|
||||
|
||||
A shell script that is a wrapper around Vndr. For information on how to use
|
||||
this, please refer to [vndr's README](https://github.com/LK4D4/vndr/blob/master/README.md)
|
69
vendor/github.com/docker/docker/hack/integration-cli-on-swarm/README.md
generated
vendored
Normal file
69
vendor/github.com/docker/docker/hack/integration-cli-on-swarm/README.md
generated
vendored
Normal file
|
@ -0,0 +1,69 @@
|
|||
# Integration Testing on Swarm
|
||||
|
||||
IT on Swarm allows you to execute integration test in parallel across a Docker Swarm cluster
|
||||
|
||||
## Architecture
|
||||
|
||||
### Master service
|
||||
|
||||
- Works as a funker caller
|
||||
- Calls a worker funker (`-worker-service`) with a chunk of `-check.f` filter strings (passed as a file via `-input` flag, typically `/mnt/input`)
|
||||
|
||||
### Worker service
|
||||
|
||||
- Works as a funker callee
|
||||
- Executes an equivalent of `TESTFLAGS=-check.f TestFoo|TestBar|TestBaz ... make test-integration-cli` using the bind-mounted API socket (`docker.sock`)
|
||||
|
||||
### Client
|
||||
|
||||
- Controls master and workers via `docker stack`
|
||||
- No need to have a local daemon
|
||||
|
||||
Typically, the master and workers are supposed to be running on a cloud environment,
|
||||
while the client is supposed to be running on a laptop, e.g. Docker for Mac/Windows.
|
||||
|
||||
## Requirement
|
||||
|
||||
- Docker daemon 1.13 or later
|
||||
- Private registry for distributed execution with multiple nodes
|
||||
|
||||
## Usage
|
||||
|
||||
### Step 1: Prepare images
|
||||
|
||||
$ make build-integration-cli-on-swarm
|
||||
|
||||
Following environment variables are known to work in this step:
|
||||
|
||||
- `BUILDFLAGS`
|
||||
- `DOCKER_INCREMENTAL_BINARY`
|
||||
|
||||
Note: during the transition into Moby Project, you might need to create a symbolic link `$GOPATH/src/github.com/docker/docker` to `$GOPATH/src/github.com/moby/moby`.
|
||||
|
||||
### Step 2: Execute tests
|
||||
|
||||
$ ./hack/integration-cli-on-swarm/integration-cli-on-swarm -replicas 40 -push-worker-image YOUR_REGISTRY.EXAMPLE.COM/integration-cli-worker:latest
|
||||
|
||||
Following environment variables are known to work in this step:
|
||||
|
||||
- `DOCKER_GRAPHDRIVER`
|
||||
- `DOCKER_EXPERIMENTAL`
|
||||
|
||||
#### Flags
|
||||
|
||||
Basic flags:
|
||||
|
||||
- `-replicas N`: the number of worker service replicas. i.e. degree of parallelism.
|
||||
- `-chunks N`: the number of chunks. By default, `chunks` == `replicas`.
|
||||
- `-push-worker-image REGISTRY/IMAGE:TAG`: push the worker image to the registry. Note that if you have only single node and hence you do not need a private registry, you do not need to specify `-push-worker-image`.
|
||||
|
||||
Experimental flags for mitigating makespan nonuniformity:
|
||||
|
||||
- `-shuffle`: Shuffle the test filter strings
|
||||
|
||||
Flags for debugging IT on Swarm itself:
|
||||
|
||||
- `-rand-seed N`: the random seed. This flag is useful for deterministic replaying. By default(0), the timestamp is used.
|
||||
- `-filters-file FILE`: the file contains `-check.f` strings. By default, the file is automatically generated.
|
||||
- `-dry-run`: skip the actual workload
|
||||
- `keep-executor`: do not auto-remove executor containers, which is used for running privileged programs on Swarm
|
2
vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/vendor.conf
generated
vendored
Normal file
2
vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/vendor.conf
generated
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
# dependencies specific to worker (i.e. github.com/docker/docker/...) are not vendored here
|
||||
github.com/bfirsh/funker-go eaa0a2e06f30e72c9a0b7f858951e581e26ef773
|
0
vendor/github.com/moby/moby/opts/ip.go → vendor/github.com/docker/docker/opts/ip.go
generated
vendored
0
vendor/github.com/moby/moby/opts/ip.go → vendor/github.com/docker/docker/opts/ip.go
generated
vendored
|
@ -0,0 +1,97 @@
|
|||
// +build ignore
|
||||
|
||||
// Simple tool to create an archive stream from an old and new directory
|
||||
//
|
||||
// By default it will stream the comparison of two temporary directories with junk files
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
flDebug = flag.Bool("D", false, "debugging output")
|
||||
flNewDir = flag.String("newdir", "", "")
|
||||
flOldDir = flag.String("olddir", "", "")
|
||||
log = logrus.New()
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Usage = func() {
|
||||
fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)")
|
||||
fmt.Printf("%s [OPTIONS]\n", os.Args[0])
|
||||
flag.PrintDefaults()
|
||||
}
|
||||
flag.Parse()
|
||||
log.Out = os.Stderr
|
||||
if (len(os.Getenv("DEBUG")) > 0) || *flDebug {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
}
|
||||
var newDir, oldDir string
|
||||
|
||||
if len(*flNewDir) == 0 {
|
||||
var err error
|
||||
newDir, err = ioutil.TempDir("", "docker-test-newDir")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(newDir)
|
||||
if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
} else {
|
||||
newDir = *flNewDir
|
||||
}
|
||||
|
||||
if len(*flOldDir) == 0 {
|
||||
oldDir, err := ioutil.TempDir("", "docker-test-oldDir")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(oldDir)
|
||||
} else {
|
||||
oldDir = *flOldDir
|
||||
}
|
||||
|
||||
changes, err := archive.ChangesDirs(newDir, oldDir)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
a, err := archive.ExportChanges(newDir, changes)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer a.Close()
|
||||
|
||||
i, err := io.Copy(os.Stdout, a)
|
||||
if err != nil && err != io.EOF {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i)
|
||||
}
|
||||
|
||||
func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) {
|
||||
fileData := []byte("fooo")
|
||||
for n := 0; n < numberOfFiles; n++ {
|
||||
fileName := fmt.Sprintf("file-%d", n)
|
||||
if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if makeLinks {
|
||||
if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
}
|
||||
totalSize := numberOfFiles * len(fileData)
|
||||
return totalSize, nil
|
||||
}
|
|
@ -3,8 +3,9 @@
|
|||
package mount
|
||||
|
||||
import (
|
||||
"golang.org/x/sys/unix"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// #include <stdlib.h>
|
||||
|
|
|
@ -4,16 +4,23 @@ package mount
|
|||
|
||||
/*
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <sys/mnttab.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func parseMountTable() ([]*Info, error) {
|
||||
mnttab := C.fopen(C.CString(C.MNTTAB), C.CString("r"))
|
||||
path := C.CString(C.MNTTAB)
|
||||
defer C.free(unsafe.Pointer(path))
|
||||
mode := C.CString("r")
|
||||
defer C.free(unsafe.Pointer(mode))
|
||||
|
||||
mnttab := C.fopen(path, mode)
|
||||
if mnttab == nil {
|
||||
return nil, fmt.Errorf("Failed to open %s", C.MNTTAB)
|
||||
}
|
||||
|
|
|
@ -1,85 +0,0 @@
|
|||
package system
|
||||
|
||||
// This file implements syscalls for Win32 events which are not implemented
|
||||
// in golang.
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
var (
|
||||
procCreateEvent = modkernel32.NewProc("CreateEventW")
|
||||
procOpenEvent = modkernel32.NewProc("OpenEventW")
|
||||
procSetEvent = modkernel32.NewProc("SetEvent")
|
||||
procResetEvent = modkernel32.NewProc("ResetEvent")
|
||||
procPulseEvent = modkernel32.NewProc("PulseEvent")
|
||||
)
|
||||
|
||||
// CreateEvent implements win32 CreateEventW func in golang. It will create an event object.
|
||||
func CreateEvent(eventAttributes *windows.SecurityAttributes, manualReset bool, initialState bool, name string) (handle windows.Handle, err error) {
|
||||
namep, _ := windows.UTF16PtrFromString(name)
|
||||
var _p1 uint32
|
||||
if manualReset {
|
||||
_p1 = 1
|
||||
}
|
||||
var _p2 uint32
|
||||
if initialState {
|
||||
_p2 = 1
|
||||
}
|
||||
r0, _, e1 := procCreateEvent.Call(uintptr(unsafe.Pointer(eventAttributes)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(namep)))
|
||||
use(unsafe.Pointer(namep))
|
||||
handle = windows.Handle(r0)
|
||||
if handle == windows.InvalidHandle {
|
||||
err = e1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// OpenEvent implements win32 OpenEventW func in golang. It opens an event object.
|
||||
func OpenEvent(desiredAccess uint32, inheritHandle bool, name string) (handle windows.Handle, err error) {
|
||||
namep, _ := windows.UTF16PtrFromString(name)
|
||||
var _p1 uint32
|
||||
if inheritHandle {
|
||||
_p1 = 1
|
||||
}
|
||||
r0, _, e1 := procOpenEvent.Call(uintptr(desiredAccess), uintptr(_p1), uintptr(unsafe.Pointer(namep)))
|
||||
use(unsafe.Pointer(namep))
|
||||
handle = windows.Handle(r0)
|
||||
if handle == windows.InvalidHandle {
|
||||
err = e1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SetEvent implements win32 SetEvent func in golang.
|
||||
func SetEvent(handle windows.Handle) (err error) {
|
||||
return setResetPulse(handle, procSetEvent)
|
||||
}
|
||||
|
||||
// ResetEvent implements win32 ResetEvent func in golang.
|
||||
func ResetEvent(handle windows.Handle) (err error) {
|
||||
return setResetPulse(handle, procResetEvent)
|
||||
}
|
||||
|
||||
// PulseEvent implements win32 PulseEvent func in golang.
|
||||
func PulseEvent(handle windows.Handle) (err error) {
|
||||
return setResetPulse(handle, procPulseEvent)
|
||||
}
|
||||
|
||||
func setResetPulse(handle windows.Handle, proc *windows.LazyProc) (err error) {
|
||||
r0, _, _ := proc.Call(uintptr(handle))
|
||||
if r0 != 0 {
|
||||
err = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var temp unsafe.Pointer
|
||||
|
||||
// use ensures a variable is kept alive without the GC freeing while still needed
|
||||
func use(p unsafe.Pointer) {
|
||||
temp = p
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue