Merge pull request #3919 from rhatdan/VENDOR
Update vendor of containers/(common,storage,image)
This commit is contained in:
commit
fe3d750724
8
go.mod
8
go.mod
|
|
@ -1,14 +1,14 @@
|
|||
module github.com/containers/buildah
|
||||
|
||||
go 1.13
|
||||
go 1.16
|
||||
|
||||
require (
|
||||
github.com/containerd/containerd v1.6.2
|
||||
github.com/containernetworking/cni v1.0.1
|
||||
github.com/containers/common v0.47.5-0.20220406101255-3dd66c046c25
|
||||
github.com/containers/image/v5 v5.21.1-0.20220405081457-d1b64686e1d0
|
||||
github.com/containers/common v0.47.5-0.20220420095823-d822f53650b2
|
||||
github.com/containers/image/v5 v5.21.1-0.20220414071450-d2d961d5d324
|
||||
github.com/containers/ocicrypt v1.1.3
|
||||
github.com/containers/storage v1.39.1-0.20220412073713-ea4008e14877
|
||||
github.com/containers/storage v1.39.1-0.20220419114238-1be409aec551
|
||||
github.com/docker/distribution v2.8.1+incompatible
|
||||
github.com/docker/docker v20.10.14+incompatible
|
||||
github.com/docker/go-units v0.4.0
|
||||
|
|
|
|||
24
go.sum
24
go.sum
|
|
@ -110,6 +110,7 @@ github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAE
|
|||
github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo=
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20210920160938-87db9fbc61c7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo=
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20220113124808-70ae35bab23f/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo=
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20220407094043-a94812496cf5/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo=
|
||||
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
|
|
@ -280,8 +281,9 @@ github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+EL
|
|||
github.com/containerd/stargz-snapshotter/estargz v0.9.0/go.mod h1:aE5PCyhFMwR8sbrErO5eM2GcvkyXTTJremG883D4qF0=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.11.0/go.mod h1:/KsZXsJRllMbTKFfG0miFQWViQKdI9+9aSXs+HN0+ac=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.11.1/go.mod h1:6VoPcf4M1wvnogWxqc4TqBWWErCS+R+ucnPZId2VbpQ=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.11.3 h1:k2kN16Px6LYuv++qFqK+JTcYqc8bEVxzGpf8/gFBL5M=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.11.3/go.mod h1:7vRJIcImfY8bpifnMjt+HTJoQxASq7T28MYbP15/Nf0=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.11.4 h1:LjrYUZpyOhiSaU7hHrdR82/RBoxfGWSaC0VeSSMXqnk=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.11.4/go.mod h1:7vRJIcImfY8bpifnMjt+HTJoQxASq7T28MYbP15/Nf0=
|
||||
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
|
||||
github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
|
||||
github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
|
||||
|
|
@ -307,11 +309,11 @@ github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRD
|
|||
github.com/containernetworking/plugins v1.0.1/go.mod h1:QHCfGpaTwYTbbH+nZXKVTxNBDZcxSOplJT5ico8/FLE=
|
||||
github.com/containernetworking/plugins v1.1.1 h1:+AGfFigZ5TiQH00vhR8qPeSatj53eNGz0C1d3wVYlHE=
|
||||
github.com/containernetworking/plugins v1.1.1/go.mod h1:Sr5TH/eBsGLXK/h71HeLfX19sZPp3ry5uHSkI4LPxV8=
|
||||
github.com/containers/common v0.47.5-0.20220406101255-3dd66c046c25 h1:IQeqv8Hf6CqFUlKaz95QFTrLc9V4sbVQyhP9jzGnNBc=
|
||||
github.com/containers/common v0.47.5-0.20220406101255-3dd66c046c25/go.mod h1:0mfWn1RRdpBjXmiunOVLaJ1I86pQjXKAc8zuiAuUesk=
|
||||
github.com/containers/common v0.47.5-0.20220420095823-d822f53650b2 h1:J5uPUMXvYxGBCthUVSYChh1lGMH/XgsZLeJAZCs+zgo=
|
||||
github.com/containers/common v0.47.5-0.20220420095823-d822f53650b2/go.mod h1:BBq6jdyjXvJh69YzQPvIuZjBho0MRdA0XGaqBnsO+1Y=
|
||||
github.com/containers/image/v5 v5.19.2-0.20220224100137-1045fb70b094/go.mod h1:XoYK6kE0dpazFNcuS+a8lra+QfbC6s8tzv+cUuCrZpE=
|
||||
github.com/containers/image/v5 v5.21.1-0.20220405081457-d1b64686e1d0 h1:Md1CckW9KSYkdtMdKG70Fc+YqCCVgT+HAr7NS9Ilf8E=
|
||||
github.com/containers/image/v5 v5.21.1-0.20220405081457-d1b64686e1d0/go.mod h1:JhGkIpC7vKBpLc6mTBE4S8cZUAD+8HgicsxYaLv6BsQ=
|
||||
github.com/containers/image/v5 v5.21.1-0.20220414071450-d2d961d5d324 h1:AJOJpnXm0wfyKr113QMTCfjvnZ17IIDxvqMpKofuvZw=
|
||||
github.com/containers/image/v5 v5.21.1-0.20220414071450-d2d961d5d324/go.mod h1:VM69F9d4EU1B9FXvpHH0nrgj0Vc6NMPI39SojiYjw1o=
|
||||
github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a h1:spAGlqziZjCJL25C6F1zsQY05tfCKE9F5YwtEWWe6hU=
|
||||
github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
|
||||
github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
|
||||
|
|
@ -324,8 +326,8 @@ github.com/containers/storage v1.37.0/go.mod h1:kqeJeS0b7DO2ZT1nVWs0XufrmPFbgV3c
|
|||
github.com/containers/storage v1.38.2/go.mod h1:INP0RPLHWBxx+pTsO5uiHlDUGHDFvWZPWprAbAlQWPQ=
|
||||
github.com/containers/storage v1.38.3-0.20220301151551-d06b0f81c0aa/go.mod h1:LkkL34WRi4dI4jt9Cp+ImdZi/P5i36glSHimT5CP5zM=
|
||||
github.com/containers/storage v1.39.0/go.mod h1:UAD0cKLouN4BOQRgZut/nMjrh/EnTCjSNPgp4ZuGWMs=
|
||||
github.com/containers/storage v1.39.1-0.20220412073713-ea4008e14877 h1:V3aVdbQt9qU6tu4HHAJtro4H8+Hnv6X/hrUNba8dll0=
|
||||
github.com/containers/storage v1.39.1-0.20220412073713-ea4008e14877/go.mod h1:UuYvGSKIdmzkjHbT/PENtxLRVGQ974nyhMbYp0KP19w=
|
||||
github.com/containers/storage v1.39.1-0.20220419114238-1be409aec551 h1:Rp2vxLkv0nwaK77MGntkH4E/db50mJ0aWCPpwx7g5EE=
|
||||
github.com/containers/storage v1.39.1-0.20220419114238-1be409aec551/go.mod h1:hFiHLMgNU0r3MiUpE97hEBaEKCN8fEIuEEBXoFC9eN0=
|
||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
|
|
@ -723,7 +725,6 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
|||
github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo=
|
||||
github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w=
|
||||
github.com/magefile/mage v1.12.1/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A=
|
||||
github.com/magefile/mage v1.13.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
|
||||
|
|
@ -785,8 +786,9 @@ github.com/moby/sys/mount v0.2.0/go.mod h1:aAivFE2LB3W4bACsUXChRHQ0qKWsetY4Y9V7s
|
|||
github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
|
||||
github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
|
||||
github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU=
|
||||
github.com/moby/sys/mountinfo v0.6.0 h1:gUDhXQx58YNrpHlK4nSL+7y2pxFZkUcXqzFDKWdC0Oo=
|
||||
github.com/moby/sys/mountinfo v0.6.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU=
|
||||
github.com/moby/sys/mountinfo v0.6.1 h1:+H/KnGEAGRpTrEAqNVQ2AM3SiwMgJUt/TXj+Z8cmCIc=
|
||||
github.com/moby/sys/mountinfo v0.6.1/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU=
|
||||
github.com/moby/sys/signal v0.6.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg=
|
||||
github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ=
|
||||
github.com/moby/sys/symlink v0.2.0/go.mod h1:7uZVF2dqJjG/NsClqul95CqKOBRQyYSNnJ6BMgR/gFs=
|
||||
|
|
@ -1021,8 +1023,8 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
|
|||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
||||
github.com/sylabs/release-tools v0.1.0/go.mod h1:pqP/z/11/rYMQ0OM/Nn7TxGijw7KfZwW9UolD/J1TUo=
|
||||
github.com/sylabs/sif/v2 v2.3.2/go.mod h1:IrLX2pzmQ2O4qgv5iy3HdKJcBNYds9DTMd9Je8A9tX4=
|
||||
github.com/sylabs/sif/v2 v2.4.2 h1:L4jcqeOF33JfSnH+8GJKC7/ooVpzpZ2K7wotGG4ZzqQ=
|
||||
github.com/sylabs/sif/v2 v2.4.2/go.mod h1:6gQvzNKRIqr4FS08XBfHpkpnxv9b7h58GLkSJ1zdK9A=
|
||||
github.com/sylabs/sif/v2 v2.5.0 h1:VGsJG0eIkYGLjYe7Wic6mYQGUe1oeJUlw0sslaFmmPs=
|
||||
github.com/sylabs/sif/v2 v2.5.0/go.mod h1:TiyBWsgWeh5yBeQFNuQnvROwswqK7YJT8JA1L53bsXQ=
|
||||
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
|
||||
|
|
|
|||
|
|
@ -26,10 +26,10 @@ import (
|
|||
"archive/tar"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
|
|
@ -48,6 +48,7 @@ type options struct {
|
|||
prioritizedFiles []string
|
||||
missedPrioritizedFiles *[]string
|
||||
compression Compression
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
type Option func(o *options) error
|
||||
|
|
@ -104,6 +105,14 @@ func WithCompression(compression Compression) Option {
|
|||
}
|
||||
}
|
||||
|
||||
// WithContext specifies a context that can be used for clean canceleration.
|
||||
func WithContext(ctx context.Context) Option {
|
||||
return func(o *options) error {
|
||||
o.ctx = ctx
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Blob is an eStargz blob.
|
||||
type Blob struct {
|
||||
io.ReadCloser
|
||||
|
|
@ -139,12 +148,29 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) {
|
|||
opts.compression = newGzipCompressionWithLevel(opts.compressionLevel)
|
||||
}
|
||||
layerFiles := newTempFiles()
|
||||
ctx := opts.ctx
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
go func() {
|
||||
select {
|
||||
case <-done:
|
||||
// nop
|
||||
case <-ctx.Done():
|
||||
layerFiles.CleanupAll()
|
||||
}
|
||||
}()
|
||||
defer func() {
|
||||
if rErr != nil {
|
||||
if err := layerFiles.CleanupAll(); err != nil {
|
||||
rErr = fmt.Errorf("failed to cleanup tmp files: %v: %w", err, rErr)
|
||||
}
|
||||
}
|
||||
if cErr := ctx.Err(); cErr != nil {
|
||||
rErr = fmt.Errorf("error from context %q: %w", cErr, rErr)
|
||||
}
|
||||
}()
|
||||
tarBlob, err := decompressBlob(tarBlob, layerFiles)
|
||||
if err != nil {
|
||||
|
|
@ -506,12 +532,13 @@ func newTempFiles() *tempFiles {
|
|||
}
|
||||
|
||||
type tempFiles struct {
|
||||
files []*os.File
|
||||
filesMu sync.Mutex
|
||||
files []*os.File
|
||||
filesMu sync.Mutex
|
||||
cleanupOnce sync.Once
|
||||
}
|
||||
|
||||
func (tf *tempFiles) TempFile(dir, pattern string) (*os.File, error) {
|
||||
f, err := ioutil.TempFile(dir, pattern)
|
||||
f, err := os.CreateTemp(dir, pattern)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -521,7 +548,14 @@ func (tf *tempFiles) TempFile(dir, pattern string) (*os.File, error) {
|
|||
return f, nil
|
||||
}
|
||||
|
||||
func (tf *tempFiles) CleanupAll() error {
|
||||
func (tf *tempFiles) CleanupAll() (err error) {
|
||||
tf.cleanupOnce.Do(func() {
|
||||
err = tf.cleanupAll()
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (tf *tempFiles) cleanupAll() error {
|
||||
tf.filesMu.Lock()
|
||||
defer tf.filesMu.Unlock()
|
||||
var allErr []error
|
||||
|
|
|
|||
|
|
@ -31,7 +31,6 @@ import (
|
|||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"sort"
|
||||
|
|
@ -579,7 +578,7 @@ func (fr *fileReader) ReadAt(p []byte, off int64) (n int, err error) {
|
|||
return 0, fmt.Errorf("fileReader.ReadAt.decompressor.Reader: %v", err)
|
||||
}
|
||||
defer dr.Close()
|
||||
if n, err := io.CopyN(ioutil.Discard, dr, off); n != off || err != nil {
|
||||
if n, err := io.CopyN(io.Discard, dr, off); n != off || err != nil {
|
||||
return 0, fmt.Errorf("discard of %d bytes = %v, %v", off, n, err)
|
||||
}
|
||||
return io.ReadFull(dr, p)
|
||||
|
|
@ -933,7 +932,7 @@ func (w *Writer) appendTar(r io.Reader, lossless bool) error {
|
|||
}
|
||||
}
|
||||
}
|
||||
remainDest := ioutil.Discard
|
||||
remainDest := io.Discard
|
||||
if lossless {
|
||||
remainDest = dst // Preserve the remaining bytes in lossless mode
|
||||
}
|
||||
|
|
|
|||
|
|
@ -31,7 +31,6 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"reflect"
|
||||
"sort"
|
||||
|
|
@ -287,11 +286,11 @@ func isSameTarGz(t *testing.T, controller TestingController, a, b []byte) bool {
|
|||
return false
|
||||
|
||||
}
|
||||
aFile, err := ioutil.ReadAll(aTar)
|
||||
aFile, err := io.ReadAll(aTar)
|
||||
if err != nil {
|
||||
t.Fatal("failed to read tar payload of A")
|
||||
}
|
||||
bFile, err := ioutil.ReadAll(bTar)
|
||||
bFile, err := io.ReadAll(bTar)
|
||||
if err != nil {
|
||||
t.Fatal("failed to read tar payload of B")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -147,15 +147,13 @@ type copier struct {
|
|||
destinationLookup LookupReferenceFunc
|
||||
}
|
||||
|
||||
var (
|
||||
// storageAllowedPolicyScopes overrides the policy for local storage
|
||||
// to ensure that we can read images from it.
|
||||
storageAllowedPolicyScopes = signature.PolicyTransportScopes{
|
||||
"": []signature.PolicyRequirement{
|
||||
signature.NewPRInsecureAcceptAnything(),
|
||||
},
|
||||
}
|
||||
)
|
||||
// storageAllowedPolicyScopes overrides the policy for local storage
|
||||
// to ensure that we can read images from it.
|
||||
var storageAllowedPolicyScopes = signature.PolicyTransportScopes{
|
||||
"": []signature.PolicyRequirement{
|
||||
signature.NewPRInsecureAcceptAnything(),
|
||||
},
|
||||
}
|
||||
|
||||
// getDockerAuthConfig extracts a docker auth config from the CopyOptions. Returns
|
||||
// nil if no credentials are set.
|
||||
|
|
|
|||
|
|
@ -95,9 +95,7 @@ func ImageConfigFromChanges(changes []string) (*ImageConfig, error) { // nolint:
|
|||
// For now: we only support key=value
|
||||
// We will attempt to strip quotation marks if present.
|
||||
|
||||
var (
|
||||
key, val string
|
||||
)
|
||||
var key, val string
|
||||
|
||||
splitEnv := strings.SplitN(value, "=", 2)
|
||||
key = splitEnv[0]
|
||||
|
|
|
|||
|
|
@ -213,7 +213,6 @@ func (i *Image) inspectInfo(ctx context.Context) (*types.ImageInspectInfo, error
|
|||
|
||||
ref, err := i.StorageReference()
|
||||
if err != nil {
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -4,12 +4,10 @@ import (
|
|||
"github.com/containers/image/v5/signature"
|
||||
)
|
||||
|
||||
var (
|
||||
// storageAllowedPolicyScopes overrides the policy for local storage
|
||||
// to ensure that we can read images from it.
|
||||
storageAllowedPolicyScopes = signature.PolicyTransportScopes{
|
||||
"": []signature.PolicyRequirement{
|
||||
signature.NewPRInsecureAcceptAnything(),
|
||||
},
|
||||
}
|
||||
)
|
||||
// storageAllowedPolicyScopes overrides the policy for local storage
|
||||
// to ensure that we can read images from it.
|
||||
var storageAllowedPolicyScopes = signature.PolicyTransportScopes{
|
||||
"": []signature.PolicyRequirement{
|
||||
signature.NewPRInsecureAcceptAnything(),
|
||||
},
|
||||
}
|
||||
|
|
|
|||
|
|
@ -384,10 +384,8 @@ func (l *list) Add(ctx context.Context, sys *types.SystemContext, ref types.Imag
|
|||
}
|
||||
instanceInfo.instanceDigest = &manifestDigest
|
||||
instanceInfo.Size = int64(len(manifestBytes))
|
||||
} else {
|
||||
if manifestDigest == "" {
|
||||
manifestDigest = *instanceInfo.instanceDigest
|
||||
}
|
||||
} else if manifestDigest == "" {
|
||||
manifestDigest = *instanceInfo.instanceDigest
|
||||
}
|
||||
err = l.List.AddInstance(*instanceInfo.instanceDigest, instanceInfo.Size, manifestType, instanceInfo.OS, instanceInfo.Architecture, instanceInfo.OSVersion, instanceInfo.OSFeatures, instanceInfo.Variant, instanceInfo.Features, instanceInfo.Annotations)
|
||||
if err != nil {
|
||||
|
|
@ -405,9 +403,7 @@ func (l *list) Add(ctx context.Context, sys *types.SystemContext, ref types.Imag
|
|||
func (l *list) Remove(instanceDigest digest.Digest) error {
|
||||
err := l.List.Remove(instanceDigest)
|
||||
if err == nil {
|
||||
if _, needToDelete := l.instances[instanceDigest]; needToDelete {
|
||||
delete(l.instances, instanceDigest)
|
||||
}
|
||||
delete(l.instances, instanceDigest)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -74,7 +74,7 @@ func (r *Runtime) SystemContext() *types.SystemContext {
|
|||
// Returns a copy of the runtime's system context.
|
||||
func (r *Runtime) systemContextCopy() *types.SystemContext {
|
||||
var sys types.SystemContext
|
||||
deepcopy.Copy(&sys, &r.systemContext)
|
||||
_ = deepcopy.Copy(&sys, &r.systemContext)
|
||||
return &sys
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -68,7 +68,6 @@ func (r *Runtime) Save(ctx context.Context, names []string, format, path string,
|
|||
}
|
||||
|
||||
return errors.Errorf("unsupported format %q for saving images", format)
|
||||
|
||||
}
|
||||
|
||||
// saveSingleImage saves the specified image name to the specified path.
|
||||
|
|
|
|||
|
|
@ -11,7 +11,6 @@ import (
|
|||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/containernetworking/cni/libcni"
|
||||
|
|
@ -21,6 +20,7 @@ import (
|
|||
pkgutil "github.com/containers/common/pkg/util"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func createNetworkFromCNIConfigList(conf *libcni.NetworkConfigList, confPath string) (*types.Network, error) {
|
||||
|
|
@ -45,12 +45,11 @@ func createNetworkFromCNIConfigList(conf *libcni.NetworkConfigList, confPath str
|
|||
}
|
||||
}
|
||||
|
||||
f, err := os.Stat(confPath)
|
||||
t, err := fileTime(confPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stat := f.Sys().(*syscall.Stat_t)
|
||||
network.Created = time.Unix(int64(stat.Ctim.Sec), int64(stat.Ctim.Nsec))
|
||||
network.Created = t
|
||||
|
||||
firstPlugin := conf.Plugins[0]
|
||||
network.Driver = firstPlugin.Network.Type
|
||||
|
|
@ -316,16 +315,15 @@ func (n *cniNetwork) createCNIConfigListFromNetwork(network *types.Network, writ
|
|||
cniPathName := ""
|
||||
if writeToDisk {
|
||||
cniPathName = filepath.Join(n.cniConfigDir, network.Name+".conflist")
|
||||
err = ioutil.WriteFile(cniPathName, b, 0644)
|
||||
err = ioutil.WriteFile(cniPathName, b, 0o644)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
f, err := os.Stat(cniPathName)
|
||||
t, err := fileTime(cniPathName)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
stat := f.Sys().(*syscall.Stat_t)
|
||||
network.Created = time.Unix(int64(stat.Ctim.Sec), int64(stat.Ctim.Nsec))
|
||||
network.Created = t
|
||||
} else {
|
||||
network.Created = time.Now()
|
||||
}
|
||||
|
|
@ -424,3 +422,17 @@ func parseOptions(networkOptions map[string]string, networkDriver string) (*opti
|
|||
}
|
||||
return opt, nil
|
||||
}
|
||||
|
||||
func fileTime(file string) (time.Time, error) {
|
||||
var st unix.Stat_t
|
||||
for {
|
||||
err := unix.Stat(file, &st)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
if err != unix.EINTR { //nolint:errorlint // unix errors are bare
|
||||
return time.Time{}, &os.PathError{Path: file, Op: "stat", Err: err}
|
||||
}
|
||||
}
|
||||
return time.Unix(int64(st.Ctim.Sec), int64(st.Ctim.Nsec)), nil //nolint:unconvert // On some platforms Sec and Nsec are int32.
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,7 +17,6 @@ import (
|
|||
|
||||
// NetworkCreate will take a partial filled Network and fill the
|
||||
// missing fields. It creates the Network and returns the full Network.
|
||||
// nolint:gocritic
|
||||
func (n *cniNetwork) NetworkCreate(net types.Network) (types.Network, error) {
|
||||
n.lock.Lock()
|
||||
defer n.lock.Unlock()
|
||||
|
|
|
|||
|
|
@ -109,7 +109,6 @@ func GetFreeIPv4NetworkSubnet(usedNetworks []*net.IPNet, subnetPools []config.Su
|
|||
return nil, err
|
||||
}
|
||||
return nil, errors.New("could not find free subnet from subnet pools")
|
||||
|
||||
}
|
||||
|
||||
// GetFreeIPv6NetworkSubnet returns a unused ipv6 subnet
|
||||
|
|
|
|||
|
|
@ -19,7 +19,6 @@ import (
|
|||
|
||||
// NetworkCreate will take a partial filled Network and fill the
|
||||
// missing fields. It creates the Network and returns the full Network.
|
||||
// nolint:gocritic
|
||||
func (n *netavarkNetwork) NetworkCreate(net types.Network) (types.Network, error) {
|
||||
n.lock.Lock()
|
||||
defer n.lock.Unlock()
|
||||
|
|
|
|||
|
|
@ -59,9 +59,7 @@ func newIPAMError(cause error, msg string, args ...interface{}) *ipamError {
|
|||
// openDB will open the ipam database
|
||||
// Note that the caller has to Close it.
|
||||
func (n *netavarkNetwork) openDB() (*bbolt.DB, error) {
|
||||
// linter complains about the octal value
|
||||
// nolint:gocritic
|
||||
db, err := bbolt.Open(n.ipamDBPath, 0600, nil)
|
||||
db, err := bbolt.Open(n.ipamDBPath, 0o600, nil)
|
||||
if err != nil {
|
||||
return nil, newIPAMError(err, "failed to open database %s", n.ipamDBPath)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -108,11 +108,11 @@ func NewNetworkInterface(conf *InitConfig) (types.ContainerNetwork, error) {
|
|||
return nil, errors.Wrap(err, "failed to parse default subnet")
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(conf.NetworkConfigDir, 0755); err != nil {
|
||||
if err := os.MkdirAll(conf.NetworkConfigDir, 0o755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(conf.NetworkRunDir, 0755); err != nil {
|
||||
if err := os.MkdirAll(conf.NetworkRunDir, 0o755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -121,8 +121,7 @@ func defaultNetworkBackend(store storage.Store, conf *config.Config) (backend ty
|
|||
defer func() {
|
||||
// only write when there is no error
|
||||
if err == nil {
|
||||
// nolint:gocritic
|
||||
if err := ioutils.AtomicWriteFile(file, []byte(backend), 0644); err != nil {
|
||||
if err := ioutils.AtomicWriteFile(file, []byte(backend), 0o644); err != nil {
|
||||
logrus.Errorf("could not write network backend to file: %v", err)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -233,7 +233,6 @@ func parseAAParserVersion(output string) (int, error) {
|
|||
// major*10^5 + minor*10^3 + patch*10^0
|
||||
numericVersion := majorVersion*1e5 + minorVersion*1e3 + patchLevel
|
||||
return numericVersion, nil
|
||||
|
||||
}
|
||||
|
||||
// CheckProfileAndLoadDefault checks if the specified profile is loaded and
|
||||
|
|
|
|||
|
|
@ -41,7 +41,6 @@ func ChangeHostPathOwnership(path string, recursive bool, uid, gid int) error {
|
|||
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to chown recursively host path")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -252,7 +252,7 @@ type EngineConfig struct {
|
|||
|
||||
// EventsLogFileMaxSize sets the maximum size for the events log. When the limit is exceeded,
|
||||
// the logfile is rotated and the old one is deleted.
|
||||
EventsLogFileMaxSize uint64 `toml:"events_logfile_max_size,omitempty,omitzero"`
|
||||
EventsLogFileMaxSize eventsLogMaxSize `toml:"events_logfile_max_size,omitzero"`
|
||||
|
||||
// EventsLogger determines where events should be logged.
|
||||
EventsLogger string `toml:"events_logger,omitempty"`
|
||||
|
|
@ -558,8 +558,10 @@ type MachineConfig struct {
|
|||
Image string `toml:"image,omitempty"`
|
||||
// Memory in MB a machine is created with.
|
||||
Memory uint64 `toml:"memory,omitempty,omitzero"`
|
||||
// Username to use for rootless podman when init-ing a podman machine VM
|
||||
// User to use for rootless podman when init-ing a podman machine VM
|
||||
User string `toml:"user,omitempty"`
|
||||
// Volumes are host directories mounted into the VM by default.
|
||||
Volumes []string `toml:"volumes"`
|
||||
}
|
||||
|
||||
// Destination represents destination for remote service
|
||||
|
|
@ -579,7 +581,6 @@ type Destination struct {
|
|||
// with cgroupv2v2. Other OCI runtimes are not yet supporting cgroupv2v2. This
|
||||
// might change in the future.
|
||||
func NewConfig(userConfigPath string) (*Config, error) {
|
||||
|
||||
// Generate the default config for the system
|
||||
config, err := DefaultConfig()
|
||||
if err != nil {
|
||||
|
|
@ -763,7 +764,6 @@ func (c *Config) addCAPPrefix() {
|
|||
|
||||
// Validate is the main entry point for library configuration validation.
|
||||
func (c *Config) Validate() error {
|
||||
|
||||
if err := c.Containers.Validate(); err != nil {
|
||||
return errors.Wrap(err, "validating containers config")
|
||||
}
|
||||
|
|
@ -820,7 +820,6 @@ func (c *EngineConfig) Validate() error {
|
|||
// It returns an `error` on validation failure, otherwise
|
||||
// `nil`.
|
||||
func (c *ContainersConfig) Validate() error {
|
||||
|
||||
if err := c.validateUlimits(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -952,7 +951,6 @@ func (c *Config) GetDefaultEnvEx(envHost, httpProxy bool) []string {
|
|||
// Capabilities returns the capabilities parses the Add and Drop capability
|
||||
// list from the default capabiltiies for the container
|
||||
func (c *Config) Capabilities(user string, addCapabilities, dropCapabilities []string) ([]string, error) {
|
||||
|
||||
userNotRoot := func(user string) bool {
|
||||
if user == "" || user == "root" || user == "0" {
|
||||
return false
|
||||
|
|
@ -1012,7 +1010,7 @@ func Device(device string) (src, dst, permissions string, err error) {
|
|||
// IsValidDeviceMode checks if the mode for device is valid or not.
|
||||
// IsValid mode is a composition of r (read), w (write), and m (mknod).
|
||||
func IsValidDeviceMode(mode string) bool {
|
||||
var legalDeviceMode = map[rune]bool{
|
||||
legalDeviceMode := map[rune]bool{
|
||||
'r': true,
|
||||
'w': true,
|
||||
'm': true,
|
||||
|
|
@ -1063,7 +1061,6 @@ func rootlessConfigPath() (string, error) {
|
|||
}
|
||||
|
||||
func stringsEq(a, b []string) bool {
|
||||
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
|
|
@ -1148,10 +1145,10 @@ func (c *Config) Write() error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
|
||||
if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil {
|
||||
return err
|
||||
}
|
||||
configFile, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0644)
|
||||
configFile, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0o644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -1264,3 +1261,33 @@ func (c *Config) setupEnv() error {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// eventsLogMaxSize is the type used by EventsLogFileMaxSize
|
||||
type eventsLogMaxSize uint64
|
||||
|
||||
// UnmarshalText parses the JSON encoding of eventsLogMaxSize and
|
||||
// stores it in a value.
|
||||
func (e *eventsLogMaxSize) UnmarshalText(text []byte) error {
|
||||
// REMOVE once writing works
|
||||
if string(text) == "" {
|
||||
return nil
|
||||
}
|
||||
val, err := units.FromHumanSize((string(text)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if val < 0 {
|
||||
return fmt.Errorf("events log file max size cannot be negative: %s", string(text))
|
||||
}
|
||||
*e = eventsLogMaxSize(uint64(val))
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalText returns the JSON encoding of eventsLogMaxSize.
|
||||
func (e eventsLogMaxSize) MarshalText() ([]byte, error) {
|
||||
if uint64(e) == DefaultEventsLogSizeMax || e == 0 {
|
||||
v := []byte{}
|
||||
return v, nil
|
||||
}
|
||||
return []byte(fmt.Sprintf("%d", e)), nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -373,11 +373,14 @@ default_sysctls = [
|
|||
# Define where event logs will be stored, when events_logger is "file".
|
||||
#events_logfile_path=""
|
||||
|
||||
# Sets the maximum size for events_logfile_path in bytes. When the limit is exceeded,
|
||||
# the logfile will be rotated and the old one will be deleted.
|
||||
# Sets the maximum size for events_logfile_path.
|
||||
# The size can be b (bytes), k (kilobytes), m (megabytes), or g (gigabytes).
|
||||
# The format for the size is `<number><unit>`, e.g., `1b` or `3g`.
|
||||
# If no unit is included then the size will be read in bytes.
|
||||
# When the limit is exceeded, the logfile will be rotated and the old one will be deleted.
|
||||
# If the maximum size is set to 0, then no limit will be applied,
|
||||
# and the logfile will not be rotated.
|
||||
#events_logfile_max_size = 0
|
||||
#events_logfile_max_size = "1m"
|
||||
|
||||
# Selects which logging mechanism to use for container engine events.
|
||||
# Valid values are `journald`, `file` and `none`.
|
||||
|
|
@ -627,8 +630,18 @@ default_sysctls = [
|
|||
#
|
||||
#user = "core"
|
||||
|
||||
# Host directories to be mounted as volumes into the VM by default.
|
||||
# Environment variables like $HOME as well as complete paths are supported for
|
||||
# the source and destination. An optional third field `:ro` can be used to
|
||||
# tell the container engines to mount the volume readonly.
|
||||
#
|
||||
# volumes = [
|
||||
# "$HOME:$HOME",
|
||||
#]
|
||||
|
||||
# The [machine] table MUST be the last entry in this file.
|
||||
# (Unless another table is added)
|
||||
# TOML does not provide a way to end a table other than a further table being
|
||||
# defined, so every key hereafter will be part of [machine] and not the
|
||||
# main config.
|
||||
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ import (
|
|||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
nettypes "github.com/containers/common/libnetwork/types"
|
||||
"github.com/containers/common/pkg/apparmor"
|
||||
|
|
@ -108,7 +109,6 @@ func parseSubnetPool(subnet string, size int) SubnetPool {
|
|||
Base: &nettypes.IPNet{IPNet: *n},
|
||||
Size: size,
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
const (
|
||||
|
|
@ -127,6 +127,9 @@ const (
|
|||
// DefaultLogSizeMax is the default value for the maximum log size
|
||||
// allowed for a container. Negative values mean that no limit is imposed.
|
||||
DefaultLogSizeMax = -1
|
||||
// DefaultEventsLogSize is the default value for the maximum events log size
|
||||
// before rotation.
|
||||
DefaultEventsLogSizeMax = uint64(1000000)
|
||||
// DefaultPidsLimit is the default value for maximum number of processes
|
||||
// allowed inside a container
|
||||
DefaultPidsLimit = 2048
|
||||
|
|
@ -155,7 +158,6 @@ const (
|
|||
|
||||
// DefaultConfig defines the default values from containers.conf
|
||||
func DefaultConfig() (*Config, error) {
|
||||
|
||||
defaultEngineConfig, err := defaultConfigFromMemory()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -246,6 +248,7 @@ func defaultMachineConfig() MachineConfig {
|
|||
Image: getDefaultMachineImage(),
|
||||
Memory: 2048,
|
||||
User: getDefaultMachineUser(),
|
||||
Volumes: []string{"$HOME:$HOME"},
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -261,6 +264,8 @@ func defaultConfigFromMemory() (*EngineConfig, error) {
|
|||
|
||||
c.EventsLogFilePath = filepath.Join(c.TmpDir, "events", "events.log")
|
||||
|
||||
c.EventsLogFileMaxSize = eventsLogMaxSize(DefaultEventsLogSizeMax)
|
||||
|
||||
c.CompatAPIEnforceDockerHub = true
|
||||
|
||||
if path, ok := os.LookupEnv("CONTAINERS_STORAGE_CONF"); ok {
|
||||
|
|
@ -397,10 +402,10 @@ func defaultTmpDir() (string, error) {
|
|||
}
|
||||
libpodRuntimeDir := filepath.Join(runtimeDir, "libpod")
|
||||
|
||||
if err := os.Mkdir(libpodRuntimeDir, 0700|os.ModeSticky); err != nil {
|
||||
if err := os.Mkdir(libpodRuntimeDir, 0o700|os.ModeSticky); err != nil {
|
||||
if !os.IsExist(err) {
|
||||
return "", err
|
||||
} else if err := os.Chmod(libpodRuntimeDir, 0700|os.ModeSticky); err != nil {
|
||||
} else if err := os.Chmod(libpodRuntimeDir, 0o700|os.ModeSticky); err != nil {
|
||||
// The directory already exist, just set the sticky bit
|
||||
return "", errors.Wrap(err, "set sticky bit on")
|
||||
}
|
||||
|
|
@ -464,6 +469,10 @@ func (c *Config) NetNS() string {
|
|||
return c.Containers.NetNS
|
||||
}
|
||||
|
||||
func (c EngineConfig) EventsLogMaxSize() uint64 {
|
||||
return uint64(c.EventsLogFileMaxSize)
|
||||
}
|
||||
|
||||
// SecurityOptions returns the default security options
|
||||
func (c *Config) SecurityOptions() []string {
|
||||
securityOpts := []string{}
|
||||
|
|
@ -593,3 +602,24 @@ func (c *Config) LogDriver() string {
|
|||
func (c *Config) MachineEnabled() bool {
|
||||
return c.Engine.MachineEnabled
|
||||
}
|
||||
|
||||
// MachineVolumes returns volumes to mount into the VM
|
||||
func (c *Config) MachineVolumes() ([]string, error) {
|
||||
return machineVolumes(c.Machine.Volumes)
|
||||
}
|
||||
|
||||
func machineVolumes(volumes []string) ([]string, error) {
|
||||
translatedVolumes := []string{}
|
||||
for _, v := range volumes {
|
||||
vol := os.ExpandEnv(v)
|
||||
split := strings.Split(vol, ":")
|
||||
if len(split) < 2 || len(split) > 3 {
|
||||
return nil, errors.Errorf("invalid machine volume %s, 2 or 3 fields required", v)
|
||||
}
|
||||
if split[0] == "" || split[1] == "" {
|
||||
return nil, errors.Errorf("invalid machine volume %s, fields must container data", v)
|
||||
}
|
||||
translatedVolumes = append(translatedVolumes, vol)
|
||||
}
|
||||
return translatedVolumes, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -58,7 +58,6 @@ func useSystemd() bool {
|
|||
val := strings.TrimSuffix(string(dat), "\n")
|
||||
usesSystemd = (val == "systemd")
|
||||
}
|
||||
return
|
||||
})
|
||||
return usesSystemd
|
||||
}
|
||||
|
|
@ -82,7 +81,6 @@ func useJournald() bool {
|
|||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
})
|
||||
return usesJournald
|
||||
}
|
||||
|
|
|
|||
|
|
@ -16,31 +16,22 @@ import (
|
|||
type List interface {
|
||||
AddInstance(manifestDigest digest.Digest, manifestSize int64, manifestType, os, architecture, osVersion string, osFeatures []string, variant string, features []string, annotations []string) error
|
||||
Remove(instanceDigest digest.Digest) error
|
||||
|
||||
SetURLs(instanceDigest digest.Digest, urls []string) error
|
||||
URLs(instanceDigest digest.Digest) ([]string, error)
|
||||
|
||||
SetAnnotations(instanceDigest *digest.Digest, annotations map[string]string) error
|
||||
Annotations(instanceDigest *digest.Digest) (map[string]string, error)
|
||||
|
||||
SetOS(instanceDigest digest.Digest, os string) error
|
||||
OS(instanceDigest digest.Digest) (string, error)
|
||||
|
||||
SetArchitecture(instanceDigest digest.Digest, arch string) error
|
||||
Architecture(instanceDigest digest.Digest) (string, error)
|
||||
|
||||
SetOSVersion(instanceDigest digest.Digest, osVersion string) error
|
||||
OSVersion(instanceDigest digest.Digest) (string, error)
|
||||
|
||||
SetVariant(instanceDigest digest.Digest, variant string) error
|
||||
Variant(instanceDigest digest.Digest) (string, error)
|
||||
|
||||
SetFeatures(instanceDigest digest.Digest, features []string) error
|
||||
Features(instanceDigest digest.Digest) ([]string, error)
|
||||
|
||||
SetOSFeatures(instanceDigest digest.Digest, osFeatures []string) error
|
||||
OSFeatures(instanceDigest digest.Digest) ([]string, error)
|
||||
|
||||
Serialize(mimeType string) ([]byte, error)
|
||||
Instances() []digest.Digest
|
||||
OCIv1() *v1.Index
|
||||
|
|
@ -81,7 +72,7 @@ func Create() List {
|
|||
|
||||
// AddInstance adds an entry for the specified manifest digest, with assorted
|
||||
// additional information specified in parameters, to the list or index.
|
||||
func (l *list) AddInstance(manifestDigest digest.Digest, manifestSize int64, manifestType, osName, architecture, osVersion string, osFeatures []string, variant string, features []string, annotations []string) error {
|
||||
func (l *list) AddInstance(manifestDigest digest.Digest, manifestSize int64, manifestType, osName, architecture, osVersion string, osFeatures []string, variant string, features, annotations []string) error {
|
||||
if err := l.Remove(manifestDigest); err != nil && !os.IsNotExist(errors.Cause(err)) {
|
||||
return err
|
||||
}
|
||||
|
|
@ -451,38 +442,37 @@ func (l *list) preferOCI() bool {
|
|||
// Serialize encodes the list using the specified format, or by selecting one
|
||||
// which it thinks is appropriate.
|
||||
func (l *list) Serialize(mimeType string) ([]byte, error) {
|
||||
var manifestBytes []byte
|
||||
var (
|
||||
res []byte
|
||||
err error
|
||||
)
|
||||
switch mimeType {
|
||||
case "":
|
||||
if l.preferOCI() {
|
||||
manifest, err := json.Marshal(&l.oci)
|
||||
res, err = json.Marshal(&l.oci)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error marshalling OCI image index")
|
||||
}
|
||||
manifestBytes = manifest
|
||||
} else {
|
||||
manifest, err := json.Marshal(&l.docker)
|
||||
res, err = json.Marshal(&l.docker)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error marshalling Docker manifest list")
|
||||
}
|
||||
manifestBytes = manifest
|
||||
}
|
||||
case v1.MediaTypeImageIndex:
|
||||
manifest, err := json.Marshal(&l.oci)
|
||||
res, err = json.Marshal(&l.oci)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error marshalling OCI image index")
|
||||
}
|
||||
manifestBytes = manifest
|
||||
case manifest.DockerV2ListMediaType:
|
||||
manifest, err := json.Marshal(&l.docker)
|
||||
res, err = json.Marshal(&l.docker)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error marshalling Docker manifest list")
|
||||
}
|
||||
manifestBytes = manifest
|
||||
default:
|
||||
return nil, errors.Wrapf(ErrManifestTypeNotSupported, "serializing list to type %q not implemented", mimeType)
|
||||
}
|
||||
return manifestBytes, nil
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// Instances returns the list of image instances mentioned in this list.
|
||||
|
|
|
|||
|
|
@ -141,7 +141,7 @@ func Device(device string) (src, dest, permissions string, err error) {
|
|||
// isValidDeviceMode checks if the mode for device is valid or not.
|
||||
// isValid mode is a composition of r (read), w (write), and m (mknod).
|
||||
func isValidDeviceMode(mode string) bool {
|
||||
var legalDeviceMode = map[rune]bool{
|
||||
legalDeviceMode := map[rune]bool{
|
||||
'r': true,
|
||||
'w': true,
|
||||
'm': true,
|
||||
|
|
|
|||
|
|
@ -112,7 +112,7 @@ func setupSeccomp(config *Seccomp, rs *specs.Spec) (*specs.LinuxSeccomp, error)
|
|||
newConfig := &specs.LinuxSeccomp{}
|
||||
|
||||
var arch string
|
||||
var native, err = libseccomp.GetNativeArch()
|
||||
native, err := libseccomp.GetNativeArch()
|
||||
if err == nil {
|
||||
arch = native.String()
|
||||
}
|
||||
|
|
|
|||
|
|
@ -262,7 +262,6 @@ func addSubscriptionsFromMountsFile(filePath, mountLabel, containerRunDir string
|
|||
data, err := readFileOrDir("", hostDirOrFile, mode.Perm())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
}
|
||||
for _, s := range data {
|
||||
if err := os.MkdirAll(filepath.Dir(ctrDirOrFileOnHost), s.dirMode); err != nil {
|
||||
|
|
@ -313,7 +312,7 @@ func addFIPSModeSubscription(mounts *[]rspec.Mount, containerRunDir, mountPoint,
|
|||
subscriptionsDir := "/run/secrets"
|
||||
ctrDirOnHost := filepath.Join(containerRunDir, subscriptionsDir)
|
||||
if _, err := os.Stat(ctrDirOnHost); os.IsNotExist(err) {
|
||||
if err = idtools.MkdirAllAs(ctrDirOnHost, 0755, uid, gid); err != nil { //nolint
|
||||
if err = idtools.MkdirAllAs(ctrDirOnHost, 0o755, uid, gid); err != nil { //nolint
|
||||
return err
|
||||
}
|
||||
if err = label.Relabel(ctrDirOnHost, mountLabel, false); err != nil {
|
||||
|
|
|
|||
|
|
@ -34,13 +34,14 @@ func GetTimestamp(value string, reference time.Time) (string, error) {
|
|||
// if the string has a Z or a + or three dashes use parse otherwise use parseinlocation
|
||||
parseInLocation := !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3)
|
||||
|
||||
if strings.Contains(value, ".") { // nolint:gocritic
|
||||
switch {
|
||||
case strings.Contains(value, "."):
|
||||
if parseInLocation {
|
||||
format = rFC3339NanoLocal
|
||||
} else {
|
||||
format = time.RFC3339Nano
|
||||
}
|
||||
} else if strings.Contains(value, "T") {
|
||||
case strings.Contains(value, "T"):
|
||||
// we want the number of colons in the T portion of the timestamp
|
||||
tcolons := strings.Count(value, ":")
|
||||
// if parseInLocation is off and we have a +/- zone offset (not Z) then
|
||||
|
|
@ -68,9 +69,9 @@ func GetTimestamp(value string, reference time.Time) (string, error) {
|
|||
format = time.RFC3339
|
||||
}
|
||||
}
|
||||
} else if parseInLocation {
|
||||
case parseInLocation:
|
||||
format = dateLocal
|
||||
} else {
|
||||
default:
|
||||
format = dateWithZone
|
||||
}
|
||||
|
||||
|
|
@ -112,7 +113,7 @@ func ParseTimestamps(value string, def int64) (secs, nanoSecs int64, err error)
|
|||
return parseTimestamp(value)
|
||||
}
|
||||
|
||||
func parseTimestamp(value string) (int64, int64, error) { // nolint:gocritic
|
||||
func parseTimestamp(value string) (int64, int64, error) {
|
||||
sa := strings.SplitN(value, ".", 2)
|
||||
s, err := strconv.ParseInt(sa[0], 10, 64)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -10,8 +10,8 @@ import (
|
|||
)
|
||||
|
||||
func Check() {
|
||||
oldUmask := syscall.Umask(0022) //nolint
|
||||
if (oldUmask & ^0022) != 0 {
|
||||
oldUmask := syscall.Umask(0o022) //nolint
|
||||
if (oldUmask & ^0o022) != 0 {
|
||||
logrus.Debugf("umask value too restrictive. Forcing it to 022")
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ var (
|
|||
// isWriteableOnlyByOwner checks that the specified permission mask allows write
|
||||
// access only to the owner.
|
||||
func isWriteableOnlyByOwner(perm os.FileMode) bool {
|
||||
return (perm & 0722) == 0700
|
||||
return (perm & 0o722) == 0o700
|
||||
}
|
||||
|
||||
// GetRuntimeDir returns the runtime directory
|
||||
|
|
@ -46,7 +46,7 @@ func GetRuntimeDir() (string, error) {
|
|||
uid := fmt.Sprintf("%d", unshare.GetRootlessUID())
|
||||
if runtimeDir == "" {
|
||||
tmpDir := filepath.Join("/run", "user", uid)
|
||||
if err := os.MkdirAll(tmpDir, 0700); err != nil {
|
||||
if err := os.MkdirAll(tmpDir, 0o700); err != nil {
|
||||
logrus.Debugf("unable to make temp dir: %v", err)
|
||||
}
|
||||
st, err := os.Stat(tmpDir)
|
||||
|
|
@ -56,7 +56,7 @@ func GetRuntimeDir() (string, error) {
|
|||
}
|
||||
if runtimeDir == "" {
|
||||
tmpDir := filepath.Join(os.TempDir(), fmt.Sprintf("podman-run-%s", uid))
|
||||
if err := os.MkdirAll(tmpDir, 0700); err != nil {
|
||||
if err := os.MkdirAll(tmpDir, 0o700); err != nil {
|
||||
logrus.Debugf("unable to make temp dir %v", err)
|
||||
}
|
||||
st, err := os.Stat(tmpDir)
|
||||
|
|
|
|||
|
|
@ -5,7 +5,6 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
|
@ -199,7 +198,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
|||
return nil, err
|
||||
}
|
||||
|
||||
reportWriter := ioutil.Discard
|
||||
reportWriter := io.Discard
|
||||
|
||||
if options.ReportWriter != nil {
|
||||
reportWriter = options.ReportWriter
|
||||
|
|
@ -232,7 +231,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
|||
// createProgressBar() will print a single line instead.
|
||||
progressOutput := reportWriter
|
||||
if !isTTY(reportWriter) {
|
||||
progressOutput = ioutil.Discard
|
||||
progressOutput = io.Discard
|
||||
}
|
||||
|
||||
c := &copier{
|
||||
|
|
@ -1091,7 +1090,7 @@ func customPartialBlobDecorFunc(s decor.Statistics) string {
|
|||
}
|
||||
|
||||
// createProgressBar creates a mpb.Bar in pool. Note that if the copier's reportWriter
|
||||
// is ioutil.Discard, the progress bar's output will be discarded
|
||||
// is io.Discard, the progress bar's output will be discarded
|
||||
// NOTE: Every progress bar created within a progress pool must either successfully
|
||||
// complete or be aborted, or pool.Wait() will hang. That is typically done
|
||||
// using "defer bar.Abort(false)", which must happen BEFORE pool.Wait() is called.
|
||||
|
|
@ -1143,7 +1142,7 @@ func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.
|
|||
),
|
||||
)
|
||||
}
|
||||
if c.progressOutput == ioutil.Discard {
|
||||
if c.progressOutput == io.Discard {
|
||||
c.Printf("Copying %s %s\n", kind, info.Digest)
|
||||
}
|
||||
return bar
|
||||
|
|
@ -1669,7 +1668,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
|||
// sent there if we are not already at EOF.
|
||||
if getOriginalLayerCopyWriter != nil {
|
||||
logrus.Debugf("Consuming rest of the original blob to satisfy getOriginalLayerCopyWriter")
|
||||
_, err := io.Copy(ioutil.Discard, originalLayerReader)
|
||||
_, err := io.Copy(io.Discard, originalLayerReader)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, errors.Wrapf(err, "reading input blob %s", srcInfo.Digest)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,7 +3,6 @@ package directory
|
|||
import (
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
|
@ -62,7 +61,7 @@ func newImageDestination(sys *types.SystemContext, ref dirReference) (types.Imag
|
|||
return nil, errors.Wrapf(err, "checking if path exists %q", d.ref.versionPath())
|
||||
}
|
||||
if versionExists {
|
||||
contents, err := ioutil.ReadFile(d.ref.versionPath())
|
||||
contents, err := os.ReadFile(d.ref.versionPath())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -86,7 +85,7 @@ func newImageDestination(sys *types.SystemContext, ref dirReference) (types.Imag
|
|||
}
|
||||
}
|
||||
// create version file
|
||||
err = ioutil.WriteFile(d.ref.versionPath(), []byte(version), 0644)
|
||||
err = os.WriteFile(d.ref.versionPath(), []byte(version), 0644)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "creating version file %q", d.ref.versionPath())
|
||||
}
|
||||
|
|
@ -149,7 +148,7 @@ func (d *dirImageDestination) HasThreadSafePutBlob() bool {
|
|||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
|
||||
blobFile, err := ioutil.TempFile(d.ref.path, "dir-put-blob")
|
||||
blobFile, err := os.CreateTemp(d.ref.path, "dir-put-blob")
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
|
|
@ -232,7 +231,7 @@ func (d *dirImageDestination) TryReusingBlob(ctx context.Context, info types.Blo
|
|||
// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
|
||||
// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
|
||||
func (d *dirImageDestination) PutManifest(ctx context.Context, manifest []byte, instanceDigest *digest.Digest) error {
|
||||
return ioutil.WriteFile(d.ref.manifestPath(instanceDigest), manifest, 0644)
|
||||
return os.WriteFile(d.ref.manifestPath(instanceDigest), manifest, 0644)
|
||||
}
|
||||
|
||||
// PutSignatures writes a set of signatures to the destination.
|
||||
|
|
@ -240,7 +239,7 @@ func (d *dirImageDestination) PutManifest(ctx context.Context, manifest []byte,
|
|||
// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
|
||||
func (d *dirImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
|
||||
for i, sig := range signatures {
|
||||
if err := ioutil.WriteFile(d.ref.signaturePath(i, instanceDigest), sig, 0644); err != nil {
|
||||
if err := os.WriteFile(d.ref.signaturePath(i, instanceDigest), sig, 0644); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
|
@ -272,7 +271,7 @@ func pathExists(path string) (bool, error) {
|
|||
|
||||
// returns true if directory is empty
|
||||
func isDirEmpty(path string) (bool, error) {
|
||||
files, err := ioutil.ReadDir(path)
|
||||
files, err := os.ReadDir(path)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
|
@ -281,7 +280,7 @@ func isDirEmpty(path string) (bool, error) {
|
|||
|
||||
// deletes the contents of a directory
|
||||
func removeDirContents(path string) error {
|
||||
files, err := ioutil.ReadDir(path)
|
||||
files, err := os.ReadDir(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,7 +3,6 @@ package directory
|
|||
import (
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/containers/image/v5/manifest"
|
||||
|
|
@ -37,7 +36,7 @@ func (s *dirImageSource) Close() error {
|
|||
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list);
|
||||
// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
|
||||
func (s *dirImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
|
||||
m, err := ioutil.ReadFile(s.ref.manifestPath(instanceDigest))
|
||||
m, err := os.ReadFile(s.ref.manifestPath(instanceDigest))
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
|
@ -71,7 +70,7 @@ func (s *dirImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache
|
|||
func (s *dirImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
|
||||
signatures := [][]byte{}
|
||||
for i := 0; ; i++ {
|
||||
signature, err := ioutil.ReadFile(s.ref.signaturePath(i, instanceDigest))
|
||||
signature, err := os.ReadFile(s.ref.signaturePath(i, instanceDigest))
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
break
|
||||
|
|
|
|||
|
|
@ -7,7 +7,6 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
|
|
@ -654,7 +653,7 @@ func (c *dockerClient) getBearerTokenOAuth2(ctx context.Context, challenge chall
|
|||
params.Add("refresh_token", c.auth.IdentityToken)
|
||||
params.Add("client_id", "containers/image")
|
||||
|
||||
authReq.Body = ioutil.NopCloser(bytes.NewBufferString(params.Encode()))
|
||||
authReq.Body = io.NopCloser(bytes.NewBufferString(params.Encode()))
|
||||
authReq.Header.Add("User-Agent", c.userAgent)
|
||||
authReq.Header.Add("Content-Type", "application/x-www-form-urlencoded")
|
||||
logrus.Debugf("%s %s", authReq.Method, authReq.URL.Redacted())
|
||||
|
|
|
|||
|
|
@ -7,7 +7,6 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
|
|
@ -592,7 +591,7 @@ func (d *dockerImageDestination) putOneSignature(url *url.URL, signature []byte)
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = ioutil.WriteFile(url.Path, signature, 0644)
|
||||
err = os.WriteFile(url.Path, signature, 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,7 +4,6 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"mime"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
|
|
@ -308,7 +307,7 @@ func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error,
|
|||
break
|
||||
}
|
||||
toSkip := c.Offset - currentOffset
|
||||
if _, err := io.Copy(ioutil.Discard, io.LimitReader(body, int64(toSkip))); err != nil {
|
||||
if _, err := io.Copy(io.Discard, io.LimitReader(body, int64(toSkip))); err != nil {
|
||||
errs <- err
|
||||
break
|
||||
}
|
||||
|
|
@ -316,7 +315,7 @@ func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error,
|
|||
}
|
||||
s := signalCloseReader{
|
||||
closed: make(chan interface{}),
|
||||
stream: ioutil.NopCloser(io.LimitReader(body, int64(c.Length))),
|
||||
stream: io.NopCloser(io.LimitReader(body, int64(c.Length))),
|
||||
consumeStream: true,
|
||||
}
|
||||
streams <- s
|
||||
|
|
@ -515,7 +514,7 @@ func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) (
|
|||
switch url.Scheme {
|
||||
case "file":
|
||||
logrus.Debugf("Reading %s", url.Path)
|
||||
sig, err := ioutil.ReadFile(url.Path)
|
||||
sig, err := os.ReadFile(url.Path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, true, nil
|
||||
|
|
@ -765,7 +764,7 @@ func (s signalCloseReader) Read(p []byte) (int, error) {
|
|||
func (s signalCloseReader) Close() error {
|
||||
defer close(s.closed)
|
||||
if s.consumeStream {
|
||||
if _, err := io.Copy(ioutil.Discard, s.stream); err != nil {
|
||||
if _, err := io.Copy(io.Discard, s.stream); err != nil {
|
||||
s.stream.Close()
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,7 +4,6 @@ import (
|
|||
"archive/tar"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
|
|
@ -53,7 +52,7 @@ func NewReaderFromFile(sys *types.SystemContext, path string) (*Reader, error) {
|
|||
// The caller should call .Close() on the returned archive when done.
|
||||
func NewReaderFromStream(sys *types.SystemContext, inputStream io.Reader) (*Reader, error) {
|
||||
// Save inputStream to a temporary file
|
||||
tarCopyFile, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(sys), "docker-tar")
|
||||
tarCopyFile, err := os.CreateTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "docker-tar")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "creating temporary file")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,7 +6,6 @@ import (
|
|||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
|
|
@ -170,7 +169,7 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif
|
|||
|
||||
uncompressedSize := h.Size
|
||||
if isCompressed {
|
||||
uncompressedSize, err = io.Copy(ioutil.Discard, uncompressedStream)
|
||||
uncompressedSize, err = io.Copy(io.Discard, uncompressedStream)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "reading %s to find its size", layerPath)
|
||||
}
|
||||
|
|
@ -263,7 +262,7 @@ func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo, cache types.B
|
|||
}
|
||||
|
||||
if info.Digest == s.configDigest { // FIXME? Implement a more general algorithm matching instead of assuming sha256.
|
||||
return ioutil.NopCloser(bytes.NewReader(s.configBytes)), int64(len(s.configBytes)), nil
|
||||
return io.NopCloser(bytes.NewReader(s.configBytes)), int64(len(s.configBytes)), nil
|
||||
}
|
||||
|
||||
if li, ok := s.knownLayers[info.Digest]; ok { // diffID is a digest of the uncompressed tarball,
|
||||
|
|
|
|||
|
|
@ -2,7 +2,6 @@ package docker
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
|
|
@ -146,7 +145,7 @@ func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) {
|
|||
continue
|
||||
}
|
||||
configPath := filepath.Join(dirPath, configName)
|
||||
configBytes, err := ioutil.ReadFile(configPath)
|
||||
configBytes, err := os.ReadFile(configPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,7 +2,6 @@ package iolimits
|
|||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
|
@ -47,7 +46,7 @@ const (
|
|||
func ReadAtMost(reader io.Reader, limit int) ([]byte, error) {
|
||||
limitedReader := io.LimitReader(reader, int64(limit+1))
|
||||
|
||||
res, err := ioutil.ReadAll(limitedReader)
|
||||
res, err := io.ReadAll(limitedReader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,7 +3,6 @@ package streamdigest
|
|||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/containers/image/v5/internal/putblobdigest"
|
||||
|
|
@ -16,7 +15,7 @@ import (
|
|||
// It is the caller's responsibility to call the cleanup function, which closes and removes the temporary file.
|
||||
// If an error occurs, inputInfo is not modified.
|
||||
func ComputeBlobInfo(sys *types.SystemContext, stream io.Reader, inputInfo *types.BlobInfo) (io.Reader, func(), error) {
|
||||
diskBlob, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(sys), "stream-blob")
|
||||
diskBlob, err := os.CreateTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "stream-blob")
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("creating temporary on-disk layer: %w", err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,7 +3,6 @@ package archive
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
|
|
@ -161,7 +160,7 @@ func (t *tempDirOCIRef) deleteTempDir() error {
|
|||
// createOCIRef creates the oci reference of the image
|
||||
// If SystemContext.BigFilesTemporaryDir not "", overrides the temporary directory to use for storing big files
|
||||
func createOCIRef(sys *types.SystemContext, image string) (tempDirOCIRef, error) {
|
||||
dir, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(sys), "oci")
|
||||
dir, err := os.MkdirTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "oci")
|
||||
if err != nil {
|
||||
return tempDirOCIRef{}, errors.Wrapf(err, "creating temp directory")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,7 +4,6 @@ import (
|
|||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
|
@ -124,7 +123,7 @@ func (d *ociImageDestination) HasThreadSafePutBlob() bool {
|
|||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
|
||||
blobFile, err := ioutil.TempFile(d.ref.dir, "oci-put-blob")
|
||||
blobFile, err := os.CreateTemp(d.ref.dir, "oci-put-blob")
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
|
|
@ -238,7 +237,7 @@ func (d *ociImageDestination) PutManifest(ctx context.Context, m []byte, instanc
|
|||
if err := ensureParentDirectoryExists(blobPath); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ioutil.WriteFile(blobPath, m, 0644); err != nil {
|
||||
if err := os.WriteFile(blobPath, m, 0644); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
@ -309,14 +308,14 @@ func (d *ociImageDestination) PutSignatures(ctx context.Context, signatures [][]
|
|||
// - Uploaded data MAY be visible to others before Commit() is called
|
||||
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
|
||||
func (d *ociImageDestination) Commit(context.Context, types.UnparsedImage) error {
|
||||
if err := ioutil.WriteFile(d.ref.ociLayoutPath(), []byte(`{"imageLayoutVersion": "1.0.0"}`), 0644); err != nil {
|
||||
if err := os.WriteFile(d.ref.ociLayoutPath(), []byte(`{"imageLayoutVersion": "1.0.0"}`), 0644); err != nil {
|
||||
return err
|
||||
}
|
||||
indexJSON, err := json.Marshal(d.index)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return ioutil.WriteFile(d.ref.indexPath(), indexJSON, 0644)
|
||||
return os.WriteFile(d.ref.indexPath(), indexJSON, 0644)
|
||||
}
|
||||
|
||||
func ensureDirectoryExists(path string) error {
|
||||
|
|
|
|||
|
|
@ -3,7 +3,6 @@ package layout
|
|||
import (
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
|
|
@ -93,7 +92,7 @@ func (s *ociImageSource) GetManifest(ctx context.Context, instanceDigest *digest
|
|||
return nil, "", err
|
||||
}
|
||||
|
||||
m, err := ioutil.ReadFile(manifestPath)
|
||||
m, err := os.ReadFile(manifestPath)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,7 +5,6 @@ import (
|
|||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
|
@ -625,7 +624,7 @@ func (rules *clientConfigLoadingRules) Load() (*clientcmdConfig, error) {
|
|||
// loadFromFile is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.LoadFromFile
|
||||
// LoadFromFile takes a filename and deserializes the contents into Config object
|
||||
func loadFromFile(filename string) (*clientcmdConfig, error) {
|
||||
kubeconfigBytes, err := ioutil.ReadFile(filename)
|
||||
kubeconfigBytes, err := os.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -1013,7 +1012,7 @@ func dataFromSliceOrFile(data []byte, file string) ([]byte, error) {
|
|||
return data, nil
|
||||
}
|
||||
if len(file) > 0 {
|
||||
fileData, err := ioutil.ReadFile(file)
|
||||
fileData, err := os.ReadFile(file)
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -10,7 +10,6 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
|
|
@ -148,7 +147,7 @@ func (d *ostreeImageDestination) HasThreadSafePutBlob() bool {
|
|||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (d *ostreeImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
|
||||
tmpDir, err := ioutil.TempDir(d.tmpDirPath, "blob")
|
||||
tmpDir, err := os.MkdirTemp(d.tmpDirPath, "blob")
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
|
|
@ -180,20 +179,24 @@ func (d *ostreeImageDestination) PutBlob(ctx context.Context, stream io.Reader,
|
|||
}
|
||||
|
||||
func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, usermode bool) error {
|
||||
entries, err := ioutil.ReadDir(dir)
|
||||
entries, err := os.ReadDir(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, info := range entries {
|
||||
fullpath := filepath.Join(dir, info.Name())
|
||||
if info.Mode()&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 {
|
||||
for _, entry := range entries {
|
||||
fullpath := filepath.Join(dir, entry.Name())
|
||||
if entry.Type()&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 {
|
||||
if err := os.Remove(fullpath); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
info, err := entry.Info()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if selinuxHnd != nil {
|
||||
relPath, err := filepath.Rel(root, fullpath)
|
||||
if err != nil {
|
||||
|
|
@ -223,7 +226,7 @@ func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, user
|
|||
}
|
||||
}
|
||||
|
||||
if info.IsDir() {
|
||||
if entry.IsDir() {
|
||||
if usermode {
|
||||
if err := os.Chmod(fullpath, info.Mode()|0700); err != nil {
|
||||
return err
|
||||
|
|
@ -233,7 +236,7 @@ func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, user
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if usermode && (info.Mode().IsRegular()) {
|
||||
} else if usermode && (entry.Type().IsRegular()) {
|
||||
if err := os.Chmod(fullpath, info.Mode()|0600); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -405,7 +408,7 @@ func (d *ostreeImageDestination) PutManifest(ctx context.Context, manifestBlob [
|
|||
}
|
||||
d.digest = digest
|
||||
|
||||
return ioutil.WriteFile(manifestPath, manifestBlob, 0644)
|
||||
return os.WriteFile(manifestPath, manifestBlob, 0644)
|
||||
}
|
||||
|
||||
// PutSignatures writes signatures to the destination.
|
||||
|
|
@ -423,7 +426,7 @@ func (d *ostreeImageDestination) PutSignatures(ctx context.Context, signatures [
|
|||
|
||||
for i, sig := range signatures {
|
||||
signaturePath := filepath.Join(d.tmpDirPath, d.ref.signaturePath(i))
|
||||
if err := ioutil.WriteFile(signaturePath, sig, 0644); err != nil {
|
||||
if err := os.WriteFile(signaturePath, sig, 0644); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -9,7 +9,6 @@ import (
|
|||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unsafe"
|
||||
|
|
@ -369,7 +368,7 @@ func (s *ostreeImageSource) GetSignatures(ctx context.Context, instanceDigest *d
|
|||
}
|
||||
defer sigReader.Close()
|
||||
|
||||
sig, err := ioutil.ReadAll(sigReader)
|
||||
sig, err := os.ReadAll(sigReader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,7 +4,6 @@ import (
|
|||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
|
@ -196,7 +195,7 @@ func (s *blobCacheSource) Close() error {
|
|||
func (s *blobCacheSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
|
||||
if instanceDigest != nil {
|
||||
filename := filepath.Join(s.reference.directory, makeFilename(*instanceDigest, false))
|
||||
manifestBytes, err := ioutil.ReadFile(filename)
|
||||
manifestBytes, err := os.ReadFile(filename)
|
||||
if err == nil {
|
||||
s.cacheHits++
|
||||
return manifestBytes, manifest.GuessMIMEType(manifestBytes), nil
|
||||
|
|
@ -280,10 +279,10 @@ func (s *blobCacheSource) LayerInfosForCopy(ctx context.Context, instanceDigest
|
|||
switch s.reference.compress {
|
||||
case types.Compress:
|
||||
alternate = blobFile + compressedNote
|
||||
replaceDigest, err = ioutil.ReadFile(alternate)
|
||||
replaceDigest, err = os.ReadFile(alternate)
|
||||
case types.Decompress:
|
||||
alternate = blobFile + decompressedNote
|
||||
replaceDigest, err = ioutil.ReadFile(alternate)
|
||||
replaceDigest, err = os.ReadFile(alternate)
|
||||
}
|
||||
if err == nil && digest.Digest(replaceDigest).Validate() == nil {
|
||||
alternate = filepath.Join(filepath.Dir(alternate), makeFilename(digest.Digest(replaceDigest), false))
|
||||
|
|
@ -373,7 +372,7 @@ func saveStream(wg *sync.WaitGroup, decompressReader io.ReadCloser, tempFile *os
|
|||
_, err3 = io.Copy(io.MultiWriter(tempFile, digester.Hash()), decompressed)
|
||||
} else {
|
||||
// Drain the pipe to keep from stalling the PutBlob() thread.
|
||||
if _, err := io.Copy(ioutil.Discard, decompressReader); err != nil {
|
||||
if _, err := io.Copy(io.Discard, decompressReader); err != nil {
|
||||
logrus.Debugf("error draining the pipe: %v", err)
|
||||
}
|
||||
}
|
||||
|
|
@ -423,7 +422,7 @@ func (d *blobCacheDestination) PutBlob(ctx context.Context, stream io.Reader, in
|
|||
compression := archive.Uncompressed
|
||||
if inputInfo.Digest != "" {
|
||||
filename := filepath.Join(d.reference.directory, makeFilename(inputInfo.Digest, isConfig))
|
||||
tempfile, err = ioutil.TempFile(d.reference.directory, makeFilename(inputInfo.Digest, isConfig))
|
||||
tempfile, err = os.CreateTemp(d.reference.directory, makeFilename(inputInfo.Digest, isConfig))
|
||||
if err == nil {
|
||||
stream = io.TeeReader(stream, tempfile)
|
||||
defer func() {
|
||||
|
|
@ -457,7 +456,7 @@ func (d *blobCacheDestination) PutBlob(ctx context.Context, stream io.Reader, in
|
|||
if compression == archive.Gzip {
|
||||
// The stream is compressed, so create a file which we'll
|
||||
// use to store a decompressed copy.
|
||||
decompressedTemp, err2 := ioutil.TempFile(d.reference.directory, makeFilename(inputInfo.Digest, isConfig))
|
||||
decompressedTemp, err2 := os.CreateTemp(d.reference.directory, makeFilename(inputInfo.Digest, isConfig))
|
||||
if err2 != nil {
|
||||
logrus.Debugf("error while creating a temporary file under %q to hold decompressed blob %q: %v", d.reference.directory, inputInfo.Digest.String(), err2)
|
||||
decompressedTemp.Close()
|
||||
|
|
|
|||
|
|
@ -5,7 +5,6 @@ import (
|
|||
"compress/bzip2"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/containers/image/v5/pkg/compression/internal"
|
||||
"github.com/containers/image/v5/pkg/compression/types"
|
||||
|
|
@ -65,7 +64,7 @@ func GzipDecompressor(r io.Reader) (io.ReadCloser, error) {
|
|||
|
||||
// Bzip2Decompressor is a DecompressorFunc for the bzip2 compression algorithm.
|
||||
func Bzip2Decompressor(r io.Reader) (io.ReadCloser, error) {
|
||||
return ioutil.NopCloser(bzip2.NewReader(r)), nil
|
||||
return io.NopCloser(bzip2.NewReader(r)), nil
|
||||
}
|
||||
|
||||
// XzDecompressor is a DecompressorFunc for the xz compression algorithm.
|
||||
|
|
@ -74,7 +73,7 @@ func XzDecompressor(r io.Reader) (io.ReadCloser, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ioutil.NopCloser(r), nil
|
||||
return io.NopCloser(r), nil
|
||||
}
|
||||
|
||||
// gzipCompressor is a CompressorFunc for the gzip compression algorithm.
|
||||
|
|
@ -161,7 +160,7 @@ func AutoDecompress(stream io.Reader) (io.ReadCloser, bool, error) {
|
|||
return nil, false, errors.Wrapf(err, "initializing decompression")
|
||||
}
|
||||
} else {
|
||||
res = ioutil.NopCloser(stream)
|
||||
res = io.NopCloser(stream)
|
||||
}
|
||||
return res, decompressor != nil, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,7 +4,6 @@ import (
|
|||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
|
|
@ -544,7 +543,7 @@ func getPathToAuthWithOS(sys *types.SystemContext, goOS string) (string, bool, e
|
|||
func readJSONFile(path string, legacyFormat bool) (dockerConfigFile, error) {
|
||||
var auths dockerConfigFile
|
||||
|
||||
raw, err := ioutil.ReadFile(path)
|
||||
raw, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
auths.AuthConfigs = map[string]dockerAuthConfig{}
|
||||
|
|
|
|||
9
vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
generated
vendored
9
vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
generated
vendored
|
|
@ -2,6 +2,7 @@ package sysregistriesv2
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
|
|
@ -643,17 +644,17 @@ func dropInConfigs(wrapper configWrapper) ([]string, error) {
|
|||
dirPaths = append(dirPaths, wrapper.userConfigDirPath)
|
||||
}
|
||||
for _, dirPath := range dirPaths {
|
||||
err := filepath.Walk(dirPath,
|
||||
err := filepath.WalkDir(dirPath,
|
||||
// WalkFunc to read additional configs
|
||||
func(path string, info os.FileInfo, err error) error {
|
||||
func(path string, d fs.DirEntry, err error) error {
|
||||
switch {
|
||||
case err != nil:
|
||||
// return error (could be a permission problem)
|
||||
return err
|
||||
case info == nil:
|
||||
case d == nil:
|
||||
// this should only happen when err != nil but let's be sure
|
||||
return nil
|
||||
case info.IsDir():
|
||||
case d.IsDir():
|
||||
if path != dirPath {
|
||||
// make sure to not recurse into sub-directories
|
||||
return filepath.SkipDir
|
||||
|
|
|
|||
|
|
@ -2,7 +2,6 @@ package tlsclientconfig
|
|||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
|
|
@ -19,7 +18,7 @@ import (
|
|||
// SetupCertificates opens all .crt, .cert, and .key files in dir and appends / loads certs and key pairs as appropriate to tlsc
|
||||
func SetupCertificates(dir string, tlsc *tls.Config) error {
|
||||
logrus.Debugf("Looking for TLS certificates and private keys in %s", dir)
|
||||
fs, err := ioutil.ReadDir(dir)
|
||||
fs, err := os.ReadDir(dir)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
|
|
@ -35,7 +34,7 @@ func SetupCertificates(dir string, tlsc *tls.Config) error {
|
|||
fullPath := filepath.Join(dir, f.Name())
|
||||
if strings.HasSuffix(f.Name(), ".crt") {
|
||||
logrus.Debugf(" crt: %s", fullPath)
|
||||
data, err := ioutil.ReadFile(fullPath)
|
||||
data, err := os.ReadFile(fullPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
// Dangling symbolic link?
|
||||
|
|
@ -81,7 +80,7 @@ func SetupCertificates(dir string, tlsc *tls.Config) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func hasFile(files []os.FileInfo, name string) bool {
|
||||
func hasFile(files []os.DirEntry, name string) bool {
|
||||
for _, f := range files {
|
||||
if f.Name() == name {
|
||||
return true
|
||||
|
|
|
|||
|
|
@ -5,7 +5,6 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
|
|
@ -103,7 +102,7 @@ func writeInjectedScript(extractedRootPath string, injectedScript []byte) error
|
|||
if err := os.MkdirAll(parentDirPath, 0755); err != nil {
|
||||
return fmt.Errorf("creating %s: %w", parentDirPath, err)
|
||||
}
|
||||
if err := ioutil.WriteFile(filePath, injectedScript, 0755); err != nil {
|
||||
if err := os.WriteFile(filePath, injectedScript, 0755); err != nil {
|
||||
return fmt.Errorf("writing %s to %s: %w", injectedScriptTargetPath, filePath, err)
|
||||
}
|
||||
return nil
|
||||
|
|
@ -121,7 +120,7 @@ func createTarFromSIFInputs(ctx context.Context, tarPath, squashFSPath string, i
|
|||
conversionCommand := fmt.Sprintf("unsquashfs -d %s -f %s && tar --acls --xattrs -C %s -cpf %s ./",
|
||||
extractedRootPath, squashFSPath, extractedRootPath, tarPath)
|
||||
script := "#!/bin/sh\n" + conversionCommand + "\n"
|
||||
if err := ioutil.WriteFile(scriptPath, []byte(script), 0755); err != nil {
|
||||
if err := os.WriteFile(scriptPath, []byte(script), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
defer os.Remove(scriptPath)
|
||||
|
|
@ -149,7 +148,7 @@ func createTarFromSIFInputs(ctx context.Context, tarPath, squashFSPath string, i
|
|||
// at start, and is exclusively used by the current process (i.e. it is safe
|
||||
// to use hard-coded relative paths within it).
|
||||
func convertSIFToElements(ctx context.Context, sifImage *sif.FileImage, tempDir string) (string, []string, error) {
|
||||
// We could allocate unique names for all of these using ioutil.Temp*, but tempDir is exclusive,
|
||||
// We could allocate unique names for all of these using os.{CreateTemp,MkdirTemp}, but tempDir is exclusive,
|
||||
// so we can just hard-code a set of unique values here.
|
||||
// We create and/or manage cleanup of these two paths.
|
||||
squashFSPath := filepath.Join(tempDir, "rootfs.squashfs")
|
||||
|
|
|
|||
|
|
@ -7,7 +7,6 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/containers/image/v5/internal/tmpdir"
|
||||
|
|
@ -65,7 +64,7 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref sifRefere
|
|||
_ = sifImg.UnloadContainer()
|
||||
}()
|
||||
|
||||
workDir, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(sys), "sif")
|
||||
workDir, err := os.MkdirTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "sif")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating temp directory: %w", err)
|
||||
}
|
||||
|
|
@ -170,7 +169,7 @@ func (s *sifImageSource) HasThreadSafeGetBlob() bool {
|
|||
func (s *sifImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
|
||||
switch info.Digest {
|
||||
case s.configDigest:
|
||||
return ioutil.NopCloser(bytes.NewBuffer(s.config)), int64(len(s.config)), nil
|
||||
return io.NopCloser(bytes.NewBuffer(s.config)), int64(len(s.config)), nil
|
||||
case s.layerDigest:
|
||||
reader, err := os.Open(s.layerFile)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ import (
|
|||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
// This code is used only to parse the data in an explicitly-untrusted
|
||||
|
|
@ -82,7 +82,7 @@ func gpgUntrustedSignatureContents(untrustedSignature []byte) (untrustedContents
|
|||
if !md.IsSigned {
|
||||
return nil, "", errors.New("The input is not a signature")
|
||||
}
|
||||
content, err := ioutil.ReadAll(md.UnverifiedBody)
|
||||
content, err := io.ReadAll(md.UnverifiedBody)
|
||||
if err != nil {
|
||||
// Coverage: An error during reading the body can happen only if
|
||||
// 1) the message is encrypted, which is not our case (and we don’t give ReadMessage the key
|
||||
|
|
|
|||
|
|
@ -7,7 +7,6 @@ import (
|
|||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/proglottis/gpgme"
|
||||
|
|
@ -37,7 +36,7 @@ func newGPGSigningMechanismInDirectory(optionalDir string) (signingMechanismWith
|
|||
// of these keys.
|
||||
// The caller must call .Close() on the returned SigningMechanism.
|
||||
func newEphemeralGPGSigningMechanism(blob []byte) (signingMechanismWithPassphrase, []string, error) {
|
||||
dir, err := ioutil.TempDir("", "containers-ephemeral-gpg-")
|
||||
dir, err := os.MkdirTemp("", "containers-ephemeral-gpg-")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ import (
|
|||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
|
|
@ -44,7 +44,7 @@ func newGPGSigningMechanismInDirectory(optionalDir string) (signingMechanismWith
|
|||
}
|
||||
}
|
||||
|
||||
pubring, err := ioutil.ReadFile(path.Join(gpgHome, "pubring.gpg"))
|
||||
pubring, err := os.ReadFile(path.Join(gpgHome, "pubring.gpg"))
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return nil, err
|
||||
|
|
@ -130,7 +130,7 @@ func (m *openpgpSigningMechanism) Verify(unverifiedSignature []byte) (contents [
|
|||
if !md.IsSigned {
|
||||
return nil, "", errors.New("not signed")
|
||||
}
|
||||
content, err := ioutil.ReadAll(md.UnverifiedBody)
|
||||
content, err := io.ReadAll(md.UnverifiedBody)
|
||||
if err != nil {
|
||||
// Coverage: md.UnverifiedBody.Read only fails if the body is encrypted
|
||||
// (and possibly also signed, but it _must_ be encrypted) and the signing
|
||||
|
|
|
|||
|
|
@ -16,7 +16,6 @@ package signature
|
|||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
|
|
@ -80,7 +79,7 @@ func defaultPolicyPathWithHomeDir(sys *types.SystemContext, homeDir string) stri
|
|||
|
||||
// NewPolicyFromFile returns a policy configured in the specified file.
|
||||
func NewPolicyFromFile(fileName string) (*Policy, error) {
|
||||
contents, err := ioutil.ReadFile(fileName)
|
||||
contents, err := os.ReadFile(fileName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ package signature
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/v5/manifest"
|
||||
|
|
@ -33,7 +33,7 @@ func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image types
|
|||
if pr.KeyData != nil {
|
||||
data = pr.KeyData
|
||||
} else {
|
||||
d, err := ioutil.ReadFile(pr.KeyPath)
|
||||
d, err := os.ReadFile(pr.KeyPath)
|
||||
if err != nil {
|
||||
return sarRejected, nil, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -10,7 +10,6 @@ import (
|
|||
stderrors "errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
|
@ -155,7 +154,7 @@ func (s *storageImageSource) HasThreadSafeGetBlob() bool {
|
|||
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
|
||||
func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (rc io.ReadCloser, n int64, err error) {
|
||||
if info.Digest == image.GzippedEmptyLayerDigest {
|
||||
return ioutil.NopCloser(bytes.NewReader(image.GzippedEmptyLayer)), int64(len(image.GzippedEmptyLayer)), nil
|
||||
return io.NopCloser(bytes.NewReader(image.GzippedEmptyLayer)), int64(len(image.GzippedEmptyLayer)), nil
|
||||
}
|
||||
|
||||
// NOTE: the blob is first written to a temporary file and subsequently
|
||||
|
|
@ -167,7 +166,7 @@ func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, c
|
|||
}
|
||||
defer rc.Close()
|
||||
|
||||
tmpFile, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(s.systemContext), "")
|
||||
tmpFile, err := os.CreateTemp(tmpdir.TemporaryDirectoryForBigFiles(s.systemContext), "")
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
|
@ -210,7 +209,7 @@ func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadC
|
|||
}
|
||||
r := bytes.NewReader(b)
|
||||
logrus.Debugf("exporting opaque data as blob %q", info.Digest.String())
|
||||
return ioutil.NopCloser(r), int64(r.Len()), "", nil
|
||||
return io.NopCloser(r), int64(r.Len()), "", nil
|
||||
}
|
||||
// Step through the list of matching layers. Tests may want to verify that if we have multiple layers
|
||||
// which claim to have the same contents, that we actually do have multiple layers, otherwise we could
|
||||
|
|
@ -395,7 +394,7 @@ func (s *storageImageSource) GetSignatures(ctx context.Context, instanceDigest *
|
|||
// newImageDestination sets us up to write a new image, caching blobs in a temporary directory until
|
||||
// it's time to Commit() the image
|
||||
func newImageDestination(sys *types.SystemContext, imageRef storageReference) (*storageImageDestination, error) {
|
||||
directory, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(sys), "storage")
|
||||
directory, err := os.MkdirTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "storage")
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "creating a temporary directory")
|
||||
}
|
||||
|
|
@ -791,7 +790,7 @@ func (s *storageImageDestination) getConfigBlob(info types.BlobInfo) ([]byte, er
|
|||
}
|
||||
// Assume it's a file, since we're only calling this from a place that expects to read files.
|
||||
if filename, ok := s.filenames[info.Digest]; ok {
|
||||
contents, err2 := ioutil.ReadFile(filename)
|
||||
contents, err2 := os.ReadFile(filename)
|
||||
if err2 != nil {
|
||||
return nil, errors.Wrapf(err2, `reading blob from file %q`, filename)
|
||||
}
|
||||
|
|
@ -1136,7 +1135,7 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t
|
|||
delete(dataBlobs, layerBlob.Digest)
|
||||
}
|
||||
for blob := range dataBlobs {
|
||||
v, err := ioutil.ReadFile(s.filenames[blob])
|
||||
v, err := os.ReadFile(s.filenames[blob])
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "copying non-layer blob %q to image", blob)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,7 +6,6 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
|
@ -87,7 +86,7 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System
|
|||
uncompressed = nil
|
||||
}
|
||||
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
|
||||
n, err := io.Copy(ioutil.Discard, reader)
|
||||
n, err := io.Copy(io.Discard, reader)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading %q: %v", filename, err)
|
||||
}
|
||||
|
|
@ -217,14 +216,14 @@ func (is *tarballImageSource) HasThreadSafeGetBlob() bool {
|
|||
func (is *tarballImageSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
|
||||
// We should only be asked about things in the manifest. Maybe the configuration blob.
|
||||
if blobinfo.Digest == is.configID {
|
||||
return ioutil.NopCloser(bytes.NewBuffer(is.config)), is.configSize, nil
|
||||
return io.NopCloser(bytes.NewBuffer(is.config)), is.configSize, nil
|
||||
}
|
||||
// Maybe one of the layer blobs.
|
||||
for i := range is.blobIDs {
|
||||
if blobinfo.Digest == is.blobIDs[i] {
|
||||
// We want to read that layer: open the file or memory block and hand it back.
|
||||
if is.filenames[i] == "-" {
|
||||
return ioutil.NopCloser(bytes.NewBuffer(is.reference.stdin)), int64(len(is.reference.stdin)), nil
|
||||
return io.NopCloser(bytes.NewBuffer(is.reference.stdin)), int64(len(is.reference.stdin)), nil
|
||||
}
|
||||
reader, err := os.Open(is.filenames[i])
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ package tarball
|
|||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
|
|
@ -36,7 +36,7 @@ func (t *tarballTransport) ParseReference(reference string) (types.ImageReferenc
|
|||
filenames := strings.Split(reference, separator)
|
||||
for _, filename := range filenames {
|
||||
if filename == "-" {
|
||||
stdin, err = ioutil.ReadAll(os.Stdin)
|
||||
stdin, err = io.ReadAll(os.Stdin)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error buffering stdin: %v", err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,3 +1,4 @@
|
|||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package overlay
|
||||
|
|
@ -11,6 +12,7 @@ import (
|
|||
"syscall"
|
||||
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/ioutils"
|
||||
"github.com/containers/storage/pkg/mount"
|
||||
"github.com/containers/storage/pkg/system"
|
||||
|
|
@ -218,3 +220,55 @@ func doesVolatile(d string) (bool, error) {
|
|||
}()
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// supportsIdmappedLowerLayers checks if the kernel supports mounting overlay on top of
|
||||
// a idmapped lower layer.
|
||||
func supportsIdmappedLowerLayers(home string) (bool, error) {
|
||||
layerDir, err := ioutil.TempDir(home, "compat")
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer func() {
|
||||
_ = os.RemoveAll(layerDir)
|
||||
}()
|
||||
|
||||
mergedDir := filepath.Join(layerDir, "merged")
|
||||
lowerDir := filepath.Join(layerDir, "lower")
|
||||
lowerMappedDir := filepath.Join(layerDir, "lower-mapped")
|
||||
upperDir := filepath.Join(layerDir, "upper")
|
||||
workDir := filepath.Join(layerDir, "work")
|
||||
|
||||
_ = idtools.MkdirAs(mergedDir, 0700, 0, 0)
|
||||
_ = idtools.MkdirAs(lowerDir, 0700, 0, 0)
|
||||
_ = idtools.MkdirAs(lowerMappedDir, 0700, 0, 0)
|
||||
_ = idtools.MkdirAs(upperDir, 0700, 0, 0)
|
||||
_ = idtools.MkdirAs(workDir, 0700, 0, 0)
|
||||
|
||||
idmap := []idtools.IDMap{
|
||||
{
|
||||
ContainerID: 0,
|
||||
HostID: 0,
|
||||
Size: 1,
|
||||
},
|
||||
}
|
||||
pid, cleanupFunc, err := createUsernsProcess(idmap, idmap)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer cleanupFunc()
|
||||
|
||||
if err := createIDMappedMount(lowerDir, lowerMappedDir, int(pid)); err != nil {
|
||||
return false, errors.Wrapf(err, "create mapped mount")
|
||||
}
|
||||
defer unix.Unmount(lowerMappedDir, unix.MNT_DETACH)
|
||||
|
||||
opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerMappedDir, upperDir, workDir)
|
||||
flags := uintptr(0)
|
||||
if err := unix.Mount("overlay", mergedDir, "overlay", flags, opts); err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer func() {
|
||||
_ = unix.Unmount(mergedDir, unix.MNT_DETACH)
|
||||
}()
|
||||
return true, nil
|
||||
}
|
||||
|
|
|
|||
160
vendor/github.com/containers/storage/drivers/overlay/idmapped_utils.go
generated
vendored
Normal file
160
vendor/github.com/containers/storage/drivers/overlay/idmapped_utils.go
generated
vendored
Normal file
|
|
@ -0,0 +1,160 @@
|
|||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package overlay
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
type attr struct {
|
||||
attrSet uint64
|
||||
attrClr uint64
|
||||
propagation uint64
|
||||
userNs uint64
|
||||
}
|
||||
|
||||
const (
|
||||
// _MOUNT_ATTR_IDMAP - Idmap mount to @userns_fd in struct mount_attr
|
||||
_MOUNT_ATTR_IDMAP = 0x00100000 //nolint:golint
|
||||
|
||||
// _OPEN_TREE_CLONE - Clone the source path mount
|
||||
_OPEN_TREE_CLONE = 0x00000001 //nolint:golint
|
||||
|
||||
// _MOVE_MOUNT_F_EMPTY_PATH - Move the path referenced by the fd
|
||||
_MOVE_MOUNT_F_EMPTY_PATH = 0x00000004 //nolint:golint
|
||||
)
|
||||
|
||||
// openTree is a wrapper for the open_tree syscall
|
||||
func openTree(path string, flags int) (fd int, err error) {
|
||||
var _p0 *byte
|
||||
|
||||
if _p0, err = syscall.BytePtrFromString(path); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
r, _, e1 := syscall.Syscall6(uintptr(unix.SYS_OPEN_TREE), uintptr(0), uintptr(unsafe.Pointer(_p0)),
|
||||
uintptr(flags), 0, 0, 0)
|
||||
if e1 != 0 {
|
||||
err = e1
|
||||
}
|
||||
return int(r), nil
|
||||
}
|
||||
|
||||
// moveMount is a wrapper for the the move_mount syscall.
|
||||
func moveMount(fdTree int, target string) (err error) {
|
||||
var _p0, _p1 *byte
|
||||
|
||||
empty := ""
|
||||
|
||||
if _p0, err = syscall.BytePtrFromString(target); err != nil {
|
||||
return err
|
||||
}
|
||||
if _p1, err = syscall.BytePtrFromString(empty); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
flags := _MOVE_MOUNT_F_EMPTY_PATH
|
||||
|
||||
_, _, e1 := syscall.Syscall6(uintptr(unix.SYS_MOVE_MOUNT),
|
||||
uintptr(fdTree), uintptr(unsafe.Pointer(_p1)),
|
||||
0, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
|
||||
if e1 != 0 {
|
||||
err = e1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// mountSetAttr is a wrapper for the mount_setattr syscall
|
||||
func mountSetAttr(dfd int, path string, flags uint, attr *attr, size uint) (err error) {
|
||||
var _p0 *byte
|
||||
|
||||
if _p0, err = syscall.BytePtrFromString(path); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, _, e1 := syscall.Syscall6(uintptr(unix.SYS_MOUNT_SETATTR), uintptr(dfd), uintptr(unsafe.Pointer(_p0)),
|
||||
uintptr(flags), uintptr(unsafe.Pointer(attr)), uintptr(size), 0)
|
||||
if e1 != 0 {
|
||||
err = e1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// createIDMappedMount creates a IDMapped bind mount from SOURCE to TARGET using the user namespace
|
||||
// for the PID process.
|
||||
func createIDMappedMount(source, target string, pid int) error {
|
||||
path := fmt.Sprintf("/proc/%d/ns/user", pid)
|
||||
userNsFile, err := os.Open(path)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to get user ns file descriptor for %q", path)
|
||||
}
|
||||
|
||||
var attr attr
|
||||
attr.attrSet = _MOUNT_ATTR_IDMAP
|
||||
attr.attrClr = 0
|
||||
attr.propagation = 0
|
||||
attr.userNs = uint64(userNsFile.Fd())
|
||||
|
||||
defer userNsFile.Close()
|
||||
|
||||
targetDirFd, err := openTree(source, _OPEN_TREE_CLONE|unix.AT_RECURSIVE)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer unix.Close(targetDirFd)
|
||||
|
||||
if err := mountSetAttr(targetDirFd, "", unix.AT_EMPTY_PATH|unix.AT_RECURSIVE,
|
||||
&attr, uint(unsafe.Sizeof(attr))); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.Mkdir(target, 0700); err != nil && !os.IsExist(err) {
|
||||
return err
|
||||
}
|
||||
return moveMount(targetDirFd, target)
|
||||
}
|
||||
|
||||
// createUsernsProcess forks the current process and creates a user namespace using the specified
|
||||
// mappings. It returns the pid of the new process.
|
||||
func createUsernsProcess(uidMaps []idtools.IDMap, gidMaps []idtools.IDMap) (int, func(), error) {
|
||||
pid, _, err := syscall.Syscall6(uintptr(unix.SYS_CLONE), unix.CLONE_NEWUSER|uintptr(unix.SIGCHLD), 0, 0, 0, 0, 0)
|
||||
if err != 0 {
|
||||
return -1, nil, err
|
||||
}
|
||||
if pid == 0 {
|
||||
_ = unix.Prctl(unix.PR_SET_PDEATHSIG, uintptr(unix.SIGKILL), 0, 0, 0)
|
||||
// just wait for the SIGKILL
|
||||
for {
|
||||
syscall.Syscall6(uintptr(unix.SYS_PAUSE), 0, 0, 0, 0, 0, 0)
|
||||
}
|
||||
}
|
||||
cleanupFunc := func() {
|
||||
unix.Kill(int(pid), unix.SIGKILL)
|
||||
_, _ = unix.Wait4(int(pid), nil, 0, nil)
|
||||
}
|
||||
writeMappings := func(fname string, idmap []idtools.IDMap) error {
|
||||
mappings := ""
|
||||
for _, m := range idmap {
|
||||
mappings = mappings + fmt.Sprintf("%d %d %d\n", m.ContainerID, m.HostID, m.Size)
|
||||
}
|
||||
return ioutil.WriteFile(fmt.Sprintf("/proc/%d/%s", pid, fname), []byte(mappings), 0600)
|
||||
}
|
||||
if err := writeMappings("uid_map", uidMaps); err != nil {
|
||||
cleanupFunc()
|
||||
return -1, nil, err
|
||||
}
|
||||
if err := writeMappings("gid_map", gidMaps); err != nil {
|
||||
cleanupFunc()
|
||||
return -1, nil, err
|
||||
}
|
||||
|
||||
return int(pid), cleanupFunc, nil
|
||||
}
|
||||
|
|
@ -39,7 +39,6 @@ import (
|
|||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/vbatts/tar-split/tar/storage"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
|
|
@ -121,6 +120,8 @@ type Driver struct {
|
|||
supportsVolatile *bool
|
||||
usingMetacopy bool
|
||||
locker *locker.Locker
|
||||
|
||||
supportsIDMappedMounts *bool
|
||||
}
|
||||
|
||||
type additionalLayerStore struct {
|
||||
|
|
@ -205,6 +206,26 @@ func checkSupportVolatile(home, runhome string) (bool, error) {
|
|||
return usingVolatile, nil
|
||||
}
|
||||
|
||||
// checkAndRecordIDMappedSupport checks and stores if the kernel supports mounting overlay on top of a
|
||||
// idmapped lower layer.
|
||||
func checkAndRecordIDMappedSupport(home, runhome string) (bool, error) {
|
||||
feature := "idmapped-lower-dir"
|
||||
overlayCacheResult, overlayCacheText, err := cachedFeatureCheck(runhome, feature)
|
||||
if err == nil {
|
||||
if overlayCacheResult {
|
||||
logrus.Debugf("Cached value indicated that overlay is supported")
|
||||
return true, nil
|
||||
}
|
||||
logrus.Debugf("Cached value indicated that overlay is not supported")
|
||||
return false, errors.New(overlayCacheText)
|
||||
}
|
||||
supportsIDMappedMounts, err := supportsIdmappedLowerLayers(home)
|
||||
if err2 := cachedFeatureRecord(runhome, feature, supportsIDMappedMounts, ""); err2 != nil {
|
||||
return false, errors.Wrap(err2, "recording overlay idmapped mounts support status")
|
||||
}
|
||||
return supportsIDMappedMounts, err
|
||||
}
|
||||
|
||||
func checkAndRecordOverlaySupport(fsMagic graphdriver.FsMagic, home, runhome string) (bool, error) {
|
||||
var supportsDType bool
|
||||
|
||||
|
|
@ -1485,6 +1506,51 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
|||
}
|
||||
}
|
||||
|
||||
if d.supportsIDmappedMounts() && len(options.UidMaps) > 0 && len(options.GidMaps) > 0 {
|
||||
var newAbsDir []string
|
||||
mappedRoot := filepath.Join(d.home, id, "mapped")
|
||||
if err := os.MkdirAll(mappedRoot, 0700); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
pid, cleanupFunc, err := createUsernsProcess(options.UidMaps, options.GidMaps)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer cleanupFunc()
|
||||
|
||||
idMappedMounts := make(map[string]string)
|
||||
|
||||
// rewrite the lower dirs to their idmapped mount.
|
||||
c := 0
|
||||
for _, absLower := range absLowers {
|
||||
mappedMountSrc := getMappedMountRoot(absLower)
|
||||
|
||||
root, found := idMappedMounts[mappedMountSrc]
|
||||
if !found {
|
||||
root = filepath.Join(mappedRoot, fmt.Sprintf("%d", c))
|
||||
c++
|
||||
if err := createIDMappedMount(mappedMountSrc, root, int(pid)); err != nil {
|
||||
return "", errors.Wrapf(err, "create mapped mount for %q on %q", mappedMountSrc, root)
|
||||
}
|
||||
idMappedMounts[mappedMountSrc] = root
|
||||
|
||||
// overlay takes a reference on the mount, so it is safe to unmount
|
||||
// the mapped idmounts as soon as the final overlay file system is mounted.
|
||||
defer unix.Unmount(root, unix.MNT_DETACH)
|
||||
}
|
||||
|
||||
// relative path to the layer through the id mapped mount
|
||||
rel, err := filepath.Rel(mappedMountSrc, absLower)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
newAbsDir = append(newAbsDir, filepath.Join(root, rel))
|
||||
}
|
||||
absLowers = newAbsDir
|
||||
}
|
||||
|
||||
var opts string
|
||||
if readWrite {
|
||||
opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), diffDir, workdir)
|
||||
|
|
@ -1587,6 +1653,18 @@ func (d *Driver) Put(id string) error {
|
|||
|
||||
unmounted := false
|
||||
|
||||
mappedRoot := filepath.Join(d.home, id, "mapped")
|
||||
// It should not happen, but cleanup any mapped mount if it was leaked.
|
||||
if _, err := os.Stat(mappedRoot); err == nil {
|
||||
mounts, err := ioutil.ReadDir(mappedRoot)
|
||||
if err == nil {
|
||||
// Go through all of the mapped mounts.
|
||||
for _, m := range mounts {
|
||||
_ = unix.Unmount(filepath.Join(mappedRoot, m.Name()), unix.MNT_DETACH)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if d.options.mountProgram != "" {
|
||||
// Attempt to unmount the FUSE mount using either fusermount or fusermount3.
|
||||
// If they fail, fallback to unix.Unmount
|
||||
|
|
@ -1664,11 +1742,24 @@ func (d *Driver) getWhiteoutFormat() archive.WhiteoutFormat {
|
|||
return whiteoutFormat
|
||||
}
|
||||
|
||||
type fileGetNilCloser struct {
|
||||
storage.FileGetter
|
||||
type overlayFileGetter struct {
|
||||
diffDirs []string
|
||||
}
|
||||
|
||||
func (f fileGetNilCloser) Close() error {
|
||||
func (g *overlayFileGetter) Get(path string) (io.ReadCloser, error) {
|
||||
for _, d := range g.diffDirs {
|
||||
f, err := os.Open(filepath.Join(d, path))
|
||||
if err == nil {
|
||||
return f, nil
|
||||
}
|
||||
}
|
||||
if len(g.diffDirs) > 0 {
|
||||
return os.Open(filepath.Join(g.diffDirs[0], path))
|
||||
}
|
||||
return nil, fmt.Errorf("%s: %w", path, os.ErrNotExist)
|
||||
}
|
||||
|
||||
func (g *overlayFileGetter) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -1677,13 +1768,18 @@ func (d *Driver) getStagingDir() string {
|
|||
}
|
||||
|
||||
// DiffGetter returns a FileGetCloser that can read files from the directory that
|
||||
// contains files for the layer differences. Used for direct access for tar-split.
|
||||
// contains files for the layer differences, either for this layer, or one of our
|
||||
// lowers if we're just a template directory. Used for direct access for tar-split.
|
||||
func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) {
|
||||
p, err := d.getDiffPath(id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fileGetNilCloser{storage.NewPathFileGetter(p)}, nil
|
||||
paths, err := d.getLowerDiffPaths(id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &overlayFileGetter{diffDirs: append([]string{p}, paths...)}, nil
|
||||
}
|
||||
|
||||
// CleanupStagingDirectory cleanups the staging directory.
|
||||
|
|
@ -1958,12 +2054,31 @@ func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMapp
|
|||
return nil
|
||||
}
|
||||
|
||||
// supportsIDmappedMounts returns whether the kernel supports using idmapped mounts with
|
||||
// overlay lower layers.
|
||||
func (d *Driver) supportsIDmappedMounts() bool {
|
||||
if d.supportsIDMappedMounts != nil {
|
||||
return *d.supportsIDMappedMounts
|
||||
}
|
||||
|
||||
supportsIDMappedMounts, err := checkAndRecordIDMappedSupport(d.home, d.runhome)
|
||||
d.supportsIDMappedMounts = &supportsIDMappedMounts
|
||||
if err == nil {
|
||||
return supportsIDMappedMounts
|
||||
}
|
||||
logrus.Debugf("Check for idmapped mounts support %v", err)
|
||||
return false
|
||||
}
|
||||
|
||||
// SupportsShifting tells whether the driver support shifting of the UIDs/GIDs in an userNS
|
||||
func (d *Driver) SupportsShifting() bool {
|
||||
if os.Getenv("_TEST_FORCE_SUPPORT_SHIFTING") == "yes-please" {
|
||||
return true
|
||||
}
|
||||
return d.options.mountProgram != ""
|
||||
if d.options.mountProgram != "" {
|
||||
return true
|
||||
}
|
||||
return d.supportsIDmappedMounts()
|
||||
}
|
||||
|
||||
// dumbJoin is more or less a dumber version of filepath.Join, but one which
|
||||
|
|
@ -2132,3 +2247,15 @@ func redirectDiffIfAdditionalLayer(diffPath string) (string, error) {
|
|||
}
|
||||
return diffPath, nil
|
||||
}
|
||||
|
||||
// getMappedMountRoot is a heuristic that calculates the parent directory where
|
||||
// the idmapped mount should be applied.
|
||||
// It is useful to minimize the number of idmapped mounts and at the same time use
|
||||
// a common path as long as possible to reduce the length of the mount data argument.
|
||||
func getMappedMountRoot(path string) string {
|
||||
dirName := filepath.Dir(path)
|
||||
if filepath.Base(dirName) == linkDir {
|
||||
return filepath.Dir(dirName)
|
||||
}
|
||||
return dirName
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ require (
|
|||
github.com/BurntSushi/toml v1.1.0
|
||||
github.com/Microsoft/go-winio v0.5.2
|
||||
github.com/Microsoft/hcsshim v0.9.2
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.11.3
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.11.4
|
||||
github.com/cyphar/filepath-securejoin v0.2.3
|
||||
github.com/docker/go-units v0.4.0
|
||||
github.com/google/go-intervals v0.0.2
|
||||
|
|
@ -16,7 +16,7 @@ require (
|
|||
github.com/klauspost/pgzip v1.2.5
|
||||
github.com/mattn/go-shellwords v1.0.12
|
||||
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible
|
||||
github.com/moby/sys/mountinfo v0.6.0
|
||||
github.com/moby/sys/mountinfo v0.6.1
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/opencontainers/runc v1.1.1
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417
|
||||
|
|
|
|||
|
|
@ -176,8 +176,8 @@ github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFY
|
|||
github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
|
||||
github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.11.3 h1:k2kN16Px6LYuv++qFqK+JTcYqc8bEVxzGpf8/gFBL5M=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.11.3/go.mod h1:7vRJIcImfY8bpifnMjt+HTJoQxASq7T28MYbP15/Nf0=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.11.4 h1:LjrYUZpyOhiSaU7hHrdR82/RBoxfGWSaC0VeSSMXqnk=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.11.4/go.mod h1:7vRJIcImfY8bpifnMjt+HTJoQxASq7T28MYbP15/Nf0=
|
||||
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
|
||||
github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
|
||||
github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
|
||||
|
|
@ -467,8 +467,8 @@ github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQ
|
|||
github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
|
||||
github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
|
||||
github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU=
|
||||
github.com/moby/sys/mountinfo v0.6.0 h1:gUDhXQx58YNrpHlK4nSL+7y2pxFZkUcXqzFDKWdC0Oo=
|
||||
github.com/moby/sys/mountinfo v0.6.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU=
|
||||
github.com/moby/sys/mountinfo v0.6.1 h1:+H/KnGEAGRpTrEAqNVQ2AM3SiwMgJUt/TXj+Z8cmCIc=
|
||||
github.com/moby/sys/mountinfo v0.6.1/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU=
|
||||
github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ=
|
||||
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
|
|
|
|||
|
|
@ -683,7 +683,7 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s
|
|||
r.bycompressedsum[layer.CompressedDigest] = append(r.bycompressedsum[layer.CompressedDigest], layer.ID)
|
||||
}
|
||||
if layer.UncompressedDigest != "" {
|
||||
r.byuncompressedsum[layer.CompressedDigest] = append(r.byuncompressedsum[layer.CompressedDigest], layer.ID)
|
||||
r.byuncompressedsum[layer.UncompressedDigest] = append(r.byuncompressedsum[layer.UncompressedDigest], layer.ID)
|
||||
}
|
||||
if err := r.Save(); err != nil {
|
||||
r.driver.Remove(id)
|
||||
|
|
@ -725,12 +725,32 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
|
|||
parent = parentLayer.ID
|
||||
}
|
||||
var parentMappings, templateIDMappings, oldMappings *idtools.IDMappings
|
||||
var (
|
||||
templateMetadata string
|
||||
templateCompressedDigest digest.Digest
|
||||
templateCompressedSize int64
|
||||
templateUncompressedDigest digest.Digest
|
||||
templateUncompressedSize int64
|
||||
templateCompressionType archive.Compression
|
||||
templateUIDs, templateGIDs []uint32
|
||||
templateTSdata []byte
|
||||
)
|
||||
if moreOptions.TemplateLayer != "" {
|
||||
var tserr error
|
||||
templateLayer, ok := r.lookup(moreOptions.TemplateLayer)
|
||||
if !ok {
|
||||
return nil, -1, ErrLayerUnknown
|
||||
}
|
||||
templateMetadata = templateLayer.Metadata
|
||||
templateIDMappings = idtools.NewIDMappingsFromMaps(templateLayer.UIDMap, templateLayer.GIDMap)
|
||||
templateCompressedDigest, templateCompressedSize = templateLayer.CompressedDigest, templateLayer.CompressedSize
|
||||
templateUncompressedDigest, templateUncompressedSize = templateLayer.UncompressedDigest, templateLayer.UncompressedSize
|
||||
templateCompressionType = templateLayer.CompressionType
|
||||
templateUIDs, templateGIDs = append([]uint32{}, templateLayer.UIDs...), append([]uint32{}, templateLayer.GIDs...)
|
||||
templateTSdata, tserr = ioutil.ReadFile(r.tspath(templateLayer.ID))
|
||||
if tserr != nil && !os.IsNotExist(tserr) {
|
||||
return nil, -1, tserr
|
||||
}
|
||||
} else {
|
||||
templateIDMappings = &idtools.IDMappings{}
|
||||
}
|
||||
|
|
@ -775,17 +795,43 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
|
|||
return nil, -1, err
|
||||
}
|
||||
}
|
||||
if len(templateTSdata) > 0 {
|
||||
if err := os.MkdirAll(filepath.Dir(r.tspath(id)), 0o700); err != nil {
|
||||
// We don't have a record of this layer, but at least
|
||||
// try to clean it up underneath us.
|
||||
if err2 := r.driver.Remove(id); err2 != nil {
|
||||
logrus.Errorf("While recovering from a failure creating in UpdateLayerIDMap, error deleting layer %#v: %v", id, err2)
|
||||
}
|
||||
return nil, -1, err
|
||||
}
|
||||
if err = ioutils.AtomicWriteFile(r.tspath(id), templateTSdata, 0o600); err != nil {
|
||||
// We don't have a record of this layer, but at least
|
||||
// try to clean it up underneath us.
|
||||
if err2 := r.driver.Remove(id); err2 != nil {
|
||||
logrus.Errorf("While recovering from a failure creating in UpdateLayerIDMap, error deleting layer %#v: %v", id, err2)
|
||||
}
|
||||
return nil, -1, err
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
layer = &Layer{
|
||||
ID: id,
|
||||
Parent: parent,
|
||||
Names: names,
|
||||
MountLabel: mountLabel,
|
||||
Created: time.Now().UTC(),
|
||||
Flags: make(map[string]interface{}),
|
||||
UIDMap: copyIDMap(moreOptions.UIDMap),
|
||||
GIDMap: copyIDMap(moreOptions.GIDMap),
|
||||
BigDataNames: []string{},
|
||||
ID: id,
|
||||
Parent: parent,
|
||||
Names: names,
|
||||
MountLabel: mountLabel,
|
||||
Metadata: templateMetadata,
|
||||
Created: time.Now().UTC(),
|
||||
CompressedDigest: templateCompressedDigest,
|
||||
CompressedSize: templateCompressedSize,
|
||||
UncompressedDigest: templateUncompressedDigest,
|
||||
UncompressedSize: templateUncompressedSize,
|
||||
CompressionType: templateCompressionType,
|
||||
UIDs: templateUIDs,
|
||||
GIDs: templateGIDs,
|
||||
Flags: make(map[string]interface{}),
|
||||
UIDMap: copyIDMap(moreOptions.UIDMap),
|
||||
GIDMap: copyIDMap(moreOptions.GIDMap),
|
||||
BigDataNames: []string{},
|
||||
}
|
||||
r.layers = append(r.layers, layer)
|
||||
r.idindex.Add(id)
|
||||
|
|
@ -820,6 +866,14 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
|
|||
return nil, -1, err
|
||||
}
|
||||
delete(layer.Flags, incompleteFlag)
|
||||
} else {
|
||||
// applyDiffWithOptions in the `diff != nil` case handles this bit for us
|
||||
if layer.CompressedDigest != "" {
|
||||
r.bycompressedsum[layer.CompressedDigest] = append(r.bycompressedsum[layer.CompressedDigest], layer.ID)
|
||||
}
|
||||
if layer.UncompressedDigest != "" {
|
||||
r.byuncompressedsum[layer.UncompressedDigest] = append(r.byuncompressedsum[layer.UncompressedDigest], layer.ID)
|
||||
}
|
||||
}
|
||||
err = r.Save()
|
||||
if err != nil {
|
||||
|
|
@ -872,7 +926,6 @@ func (r *layerStore) Mounted(id string) (int, error) {
|
|||
}
|
||||
|
||||
func (r *layerStore) Mount(id string, options drivers.MountOpts) (string, error) {
|
||||
|
||||
// check whether options include ro option
|
||||
hasReadOnlyOpt := func(opts []string) bool {
|
||||
for _, item := range opts {
|
||||
|
|
|
|||
|
|
@ -2452,6 +2452,10 @@ func (s *store) DeleteImage(id string, commit bool) (layers []string, err error)
|
|||
}
|
||||
layer := image.TopLayer
|
||||
layersToRemoveMap := make(map[string]struct{})
|
||||
layersToRemove = append(layersToRemove, image.MappedTopLayers...)
|
||||
for _, mappedTopLayer := range image.MappedTopLayers {
|
||||
layersToRemoveMap[mappedTopLayer] = struct{}{}
|
||||
}
|
||||
for layer != "" {
|
||||
if rcstore.Exists(layer) {
|
||||
break
|
||||
|
|
@ -2483,12 +2487,6 @@ func (s *store) DeleteImage(id string, commit bool) (layers []string, err error)
|
|||
if hasChildrenNotBeingRemoved() {
|
||||
break
|
||||
}
|
||||
if layer == image.TopLayer {
|
||||
layersToRemove = append(layersToRemove, image.MappedTopLayers...)
|
||||
for _, mappedTopLayer := range image.MappedTopLayers {
|
||||
layersToRemoveMap[mappedTopLayer] = struct{}{}
|
||||
}
|
||||
}
|
||||
layersToRemove = append(layersToRemove, layer)
|
||||
layersToRemoveMap[layer] = struct{}{}
|
||||
layer = parent
|
||||
|
|
|
|||
|
|
@ -42,13 +42,14 @@ func validateMountOptions(mountOptions []string) error {
|
|||
}
|
||||
|
||||
func applyNameOperation(oldNames []string, opParameters []string, op updateNameOperation) ([]string, error) {
|
||||
result := make([]string, 0)
|
||||
var result []string
|
||||
switch op {
|
||||
case setNames:
|
||||
// ignore all old names and just return new names
|
||||
return dedupeNames(opParameters), nil
|
||||
result = opParameters
|
||||
case removeNames:
|
||||
// remove given names from old names
|
||||
result = make([]string, 0, len(oldNames))
|
||||
for _, name := range oldNames {
|
||||
// only keep names in final result which do not intersect with input names
|
||||
// basically `result = oldNames - opParameters`
|
||||
|
|
@ -62,11 +63,10 @@ func applyNameOperation(oldNames []string, opParameters []string, op updateNameO
|
|||
result = append(result, name)
|
||||
}
|
||||
}
|
||||
return dedupeNames(result), nil
|
||||
case addNames:
|
||||
result = make([]string, 0, len(opParameters)+len(oldNames))
|
||||
result = append(result, opParameters...)
|
||||
result = append(result, oldNames...)
|
||||
return dedupeNames(result), nil
|
||||
default:
|
||||
return result, errInvalidUpdateNameOperation
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,7 +4,6 @@
|
|||
package mountinfo
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
|
|
@ -33,13 +32,13 @@ func mountedByStat(path string) (bool, error) {
|
|||
|
||||
func normalizePath(path string) (realPath string, err error) {
|
||||
if realPath, err = filepath.Abs(path); err != nil {
|
||||
return "", fmt.Errorf("unable to get absolute path for %q: %w", path, err)
|
||||
return "", err
|
||||
}
|
||||
if realPath, err = filepath.EvalSymlinks(realPath); err != nil {
|
||||
return "", fmt.Errorf("failed to canonicalise path for %q: %w", path, err)
|
||||
return "", err
|
||||
}
|
||||
if _, err := os.Stat(realPath); err != nil {
|
||||
return "", fmt.Errorf("failed to stat target of %q: %w", path, err)
|
||||
return "", err
|
||||
}
|
||||
return realPath, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -46,15 +46,17 @@ github.com/chzyer/readline
|
|||
# github.com/containerd/cgroups v1.0.3
|
||||
github.com/containerd/cgroups/stats/v1
|
||||
# github.com/containerd/containerd v1.6.2
|
||||
## explicit
|
||||
github.com/containerd/containerd/errdefs
|
||||
github.com/containerd/containerd/log
|
||||
github.com/containerd/containerd/pkg/userns
|
||||
github.com/containerd/containerd/platforms
|
||||
github.com/containerd/containerd/sys
|
||||
# github.com/containerd/stargz-snapshotter/estargz v0.11.3
|
||||
# github.com/containerd/stargz-snapshotter/estargz v0.11.4
|
||||
github.com/containerd/stargz-snapshotter/estargz
|
||||
github.com/containerd/stargz-snapshotter/estargz/errorutil
|
||||
# github.com/containernetworking/cni v1.0.1
|
||||
## explicit
|
||||
github.com/containernetworking/cni/libcni
|
||||
github.com/containernetworking/cni/pkg/invoke
|
||||
github.com/containernetworking/cni/pkg/types
|
||||
|
|
@ -67,7 +69,8 @@ github.com/containernetworking/cni/pkg/utils
|
|||
github.com/containernetworking/cni/pkg/version
|
||||
# github.com/containernetworking/plugins v1.1.1
|
||||
github.com/containernetworking/plugins/pkg/ns
|
||||
# github.com/containers/common v0.47.5-0.20220406101255-3dd66c046c25
|
||||
# github.com/containers/common v0.47.5-0.20220420095823-d822f53650b2
|
||||
## explicit
|
||||
github.com/containers/common/libimage
|
||||
github.com/containers/common/libimage/manifests
|
||||
github.com/containers/common/libnetwork/cni
|
||||
|
|
@ -97,7 +100,8 @@ github.com/containers/common/pkg/timetype
|
|||
github.com/containers/common/pkg/umask
|
||||
github.com/containers/common/pkg/util
|
||||
github.com/containers/common/version
|
||||
# github.com/containers/image/v5 v5.21.1-0.20220405081457-d1b64686e1d0
|
||||
# github.com/containers/image/v5 v5.21.1-0.20220414071450-d2d961d5d324
|
||||
## explicit
|
||||
github.com/containers/image/v5/copy
|
||||
github.com/containers/image/v5/directory
|
||||
github.com/containers/image/v5/directory/explicitfilepath
|
||||
|
|
@ -150,6 +154,7 @@ github.com/containers/image/v5/version
|
|||
# github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a
|
||||
github.com/containers/libtrust
|
||||
# github.com/containers/ocicrypt v1.1.3
|
||||
## explicit
|
||||
github.com/containers/ocicrypt
|
||||
github.com/containers/ocicrypt/blockcipher
|
||||
github.com/containers/ocicrypt/config
|
||||
|
|
@ -166,7 +171,8 @@ github.com/containers/ocicrypt/keywrap/pkcs7
|
|||
github.com/containers/ocicrypt/spec
|
||||
github.com/containers/ocicrypt/utils
|
||||
github.com/containers/ocicrypt/utils/keyprovider
|
||||
# github.com/containers/storage v1.39.1-0.20220412073713-ea4008e14877
|
||||
# github.com/containers/storage v1.39.1-0.20220419114238-1be409aec551
|
||||
## explicit
|
||||
github.com/containers/storage
|
||||
github.com/containers/storage/drivers
|
||||
github.com/containers/storage/drivers/aufs
|
||||
|
|
@ -218,6 +224,7 @@ github.com/davecgh/go-spew/spew
|
|||
# github.com/disiqueira/gotree/v3 v3.0.2
|
||||
github.com/disiqueira/gotree/v3
|
||||
# github.com/docker/distribution v2.8.1+incompatible
|
||||
## explicit
|
||||
github.com/docker/distribution
|
||||
github.com/docker/distribution/digestset
|
||||
github.com/docker/distribution/metrics
|
||||
|
|
@ -230,6 +237,7 @@ github.com/docker/distribution/registry/client/transport
|
|||
github.com/docker/distribution/registry/storage/cache
|
||||
github.com/docker/distribution/registry/storage/cache/memory
|
||||
# github.com/docker/docker v20.10.14+incompatible
|
||||
## explicit
|
||||
github.com/docker/docker/api
|
||||
github.com/docker/docker/api/types
|
||||
github.com/docker/docker/api/types/blkiodev
|
||||
|
|
@ -268,16 +276,20 @@ github.com/docker/go-connections/tlsconfig
|
|||
# github.com/docker/go-metrics v0.0.1
|
||||
github.com/docker/go-metrics
|
||||
# github.com/docker/go-units v0.4.0
|
||||
## explicit
|
||||
github.com/docker/go-units
|
||||
# github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316
|
||||
## explicit
|
||||
github.com/docker/libnetwork/resolvconf
|
||||
github.com/docker/libnetwork/resolvconf/dns
|
||||
github.com/docker/libnetwork/types
|
||||
# github.com/fsnotify/fsnotify v1.5.1
|
||||
github.com/fsnotify/fsnotify
|
||||
# github.com/fsouza/go-dockerclient v1.7.10
|
||||
## explicit
|
||||
github.com/fsouza/go-dockerclient
|
||||
# github.com/ghodss/yaml v1.0.0
|
||||
## explicit
|
||||
github.com/ghodss/yaml
|
||||
# github.com/gogo/protobuf v1.3.2
|
||||
github.com/gogo/protobuf/gogoproto
|
||||
|
|
@ -300,12 +312,14 @@ github.com/gorilla/mux
|
|||
# github.com/hashicorp/errwrap v1.1.0
|
||||
github.com/hashicorp/errwrap
|
||||
# github.com/hashicorp/go-multierror v1.1.1
|
||||
## explicit
|
||||
github.com/hashicorp/go-multierror
|
||||
# github.com/imdario/mergo v0.3.12
|
||||
github.com/imdario/mergo
|
||||
# github.com/inconshreveable/mousetrap v1.0.0
|
||||
github.com/inconshreveable/mousetrap
|
||||
# github.com/ishidawataru/sctp v0.0.0-20210226210310-f2269e66cdee
|
||||
## explicit
|
||||
github.com/ishidawataru/sctp
|
||||
# github.com/jinzhu/copier v0.3.5
|
||||
github.com/jinzhu/copier
|
||||
|
|
@ -322,6 +336,7 @@ github.com/klauspost/compress/zstd/internal/xxhash
|
|||
# github.com/klauspost/pgzip v1.2.5
|
||||
github.com/klauspost/pgzip
|
||||
# github.com/konsorten/go-windows-terminal-sequences v1.0.3
|
||||
## explicit
|
||||
github.com/konsorten/go-windows-terminal-sequences
|
||||
# github.com/manifoldco/promptui v0.9.0
|
||||
github.com/manifoldco/promptui
|
||||
|
|
@ -330,6 +345,7 @@ github.com/manifoldco/promptui/screenbuf
|
|||
# github.com/mattn/go-runewidth v0.0.13
|
||||
github.com/mattn/go-runewidth
|
||||
# github.com/mattn/go-shellwords v1.0.12
|
||||
## explicit
|
||||
github.com/mattn/go-shellwords
|
||||
# github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369
|
||||
github.com/matttproud/golang_protobuf_extensions/pbutil
|
||||
|
|
@ -339,7 +355,7 @@ github.com/miekg/pkcs11
|
|||
github.com/mistifyio/go-zfs
|
||||
# github.com/moby/sys/mount v0.2.0
|
||||
github.com/moby/sys/mount
|
||||
# github.com/moby/sys/mountinfo v0.6.0
|
||||
# github.com/moby/sys/mountinfo v0.6.1
|
||||
github.com/moby/sys/mountinfo
|
||||
# github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6
|
||||
github.com/moby/term
|
||||
|
|
@ -357,6 +373,7 @@ github.com/nxadm/tail/util
|
|||
github.com/nxadm/tail/watch
|
||||
github.com/nxadm/tail/winfile
|
||||
# github.com/onsi/ginkgo v1.16.5
|
||||
## explicit
|
||||
github.com/onsi/ginkgo
|
||||
github.com/onsi/ginkgo/config
|
||||
github.com/onsi/ginkgo/formatter
|
||||
|
|
@ -378,6 +395,7 @@ github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable
|
|||
github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty
|
||||
github.com/onsi/ginkgo/types
|
||||
# github.com/onsi/gomega v1.19.0
|
||||
## explicit
|
||||
github.com/onsi/gomega
|
||||
github.com/onsi/gomega/format
|
||||
github.com/onsi/gomega/gbytes
|
||||
|
|
@ -391,19 +409,24 @@ github.com/onsi/gomega/matchers/support/goraph/node
|
|||
github.com/onsi/gomega/matchers/support/goraph/util
|
||||
github.com/onsi/gomega/types
|
||||
# github.com/opencontainers/go-digest v1.0.0
|
||||
## explicit
|
||||
github.com/opencontainers/go-digest
|
||||
# github.com/opencontainers/image-spec v1.0.3-0.20211202193544-a5463b7f9c84 => github.com/opencontainers/image-spec v1.0.2-0.20211123152302-43a7dee1ec31
|
||||
## explicit
|
||||
github.com/opencontainers/image-spec/specs-go
|
||||
github.com/opencontainers/image-spec/specs-go/v1
|
||||
# github.com/opencontainers/runc v1.1.1
|
||||
## explicit
|
||||
github.com/opencontainers/runc/libcontainer/apparmor
|
||||
github.com/opencontainers/runc/libcontainer/devices
|
||||
github.com/opencontainers/runc/libcontainer/user
|
||||
github.com/opencontainers/runc/libcontainer/userns
|
||||
github.com/opencontainers/runc/libcontainer/utils
|
||||
# github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417
|
||||
## explicit
|
||||
github.com/opencontainers/runtime-spec/specs-go
|
||||
# github.com/opencontainers/runtime-tools v0.9.0
|
||||
## explicit
|
||||
github.com/opencontainers/runtime-tools/error
|
||||
github.com/opencontainers/runtime-tools/filepath
|
||||
github.com/opencontainers/runtime-tools/generate
|
||||
|
|
@ -411,11 +434,13 @@ github.com/opencontainers/runtime-tools/generate/seccomp
|
|||
github.com/opencontainers/runtime-tools/specerror
|
||||
github.com/opencontainers/runtime-tools/validate
|
||||
# github.com/opencontainers/selinux v1.10.1
|
||||
## explicit
|
||||
github.com/opencontainers/selinux/go-selinux
|
||||
github.com/opencontainers/selinux/go-selinux/label
|
||||
github.com/opencontainers/selinux/pkg/pwalk
|
||||
github.com/opencontainers/selinux/pkg/pwalkdir
|
||||
# github.com/openshift/imagebuilder v1.2.3
|
||||
## explicit
|
||||
github.com/openshift/imagebuilder
|
||||
github.com/openshift/imagebuilder/dockerclient
|
||||
github.com/openshift/imagebuilder/dockerfile/command
|
||||
|
|
@ -427,12 +452,14 @@ github.com/openshift/imagebuilder/strslice
|
|||
github.com/ostreedev/ostree-go/pkg/glibobject
|
||||
github.com/ostreedev/ostree-go/pkg/otbuiltin
|
||||
# github.com/pkg/errors v0.9.1
|
||||
## explicit
|
||||
github.com/pkg/errors
|
||||
# github.com/pmezard/go-difflib v1.0.0
|
||||
github.com/pmezard/go-difflib/difflib
|
||||
# github.com/proglottis/gpgme v0.1.1
|
||||
github.com/proglottis/gpgme
|
||||
# github.com/prometheus/client_golang v1.11.1
|
||||
## explicit
|
||||
github.com/prometheus/client_golang/prometheus
|
||||
github.com/prometheus/client_golang/prometheus/internal
|
||||
github.com/prometheus/client_golang/prometheus/promhttp
|
||||
|
|
@ -449,21 +476,27 @@ github.com/prometheus/procfs/internal/util
|
|||
# github.com/rivo/uniseg v0.2.0
|
||||
github.com/rivo/uniseg
|
||||
# github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921
|
||||
## explicit
|
||||
github.com/seccomp/libseccomp-golang
|
||||
# github.com/sirupsen/logrus v1.8.1 => github.com/sirupsen/logrus v1.4.2
|
||||
## explicit
|
||||
github.com/sirupsen/logrus
|
||||
# github.com/spf13/cobra v1.4.0
|
||||
## explicit
|
||||
github.com/spf13/cobra
|
||||
# github.com/spf13/pflag v1.0.5
|
||||
## explicit
|
||||
github.com/spf13/pflag
|
||||
# github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980
|
||||
github.com/stefanberger/go-pkcs11uri
|
||||
# github.com/stretchr/testify v1.7.1
|
||||
## explicit
|
||||
github.com/stretchr/testify/assert
|
||||
github.com/stretchr/testify/require
|
||||
# github.com/sylabs/sif/v2 v2.4.2
|
||||
# github.com/sylabs/sif/v2 v2.5.0
|
||||
github.com/sylabs/sif/v2/pkg/sif
|
||||
# github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
|
||||
## explicit
|
||||
github.com/syndtr/gocapability/capability
|
||||
# github.com/tchap/go-patricia v2.3.0+incompatible
|
||||
github.com/tchap/go-patricia/patricia
|
||||
|
|
@ -493,6 +526,7 @@ github.com/xeipuuv/gojsonreference
|
|||
# github.com/xeipuuv/gojsonschema v1.2.0
|
||||
github.com/xeipuuv/gojsonschema
|
||||
# go.etcd.io/bbolt v1.3.6
|
||||
## explicit
|
||||
go.etcd.io/bbolt
|
||||
# go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1
|
||||
go.mozilla.org/pkcs7
|
||||
|
|
@ -503,6 +537,7 @@ go.opencensus.io/trace
|
|||
go.opencensus.io/trace/internal
|
||||
go.opencensus.io/trace/tracestate
|
||||
# golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4
|
||||
## explicit
|
||||
golang.org/x/crypto/bcrypt
|
||||
golang.org/x/crypto/blowfish
|
||||
golang.org/x/crypto/cast5
|
||||
|
|
@ -534,9 +569,11 @@ golang.org/x/net/idna
|
|||
golang.org/x/net/internal/timeseries
|
||||
golang.org/x/net/trace
|
||||
# golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||
## explicit
|
||||
golang.org/x/sync/errgroup
|
||||
golang.org/x/sync/semaphore
|
||||
# golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9
|
||||
## explicit
|
||||
golang.org/x/sys/cpu
|
||||
golang.org/x/sys/execabs
|
||||
golang.org/x/sys/internal/unsafeheader
|
||||
|
|
@ -544,6 +581,7 @@ golang.org/x/sys/plan9
|
|||
golang.org/x/sys/unix
|
||||
golang.org/x/sys/windows
|
||||
# golang.org/x/term v0.0.0-20210927222741-03fcf44c2211
|
||||
## explicit
|
||||
golang.org/x/term
|
||||
# golang.org/x/text v0.3.7
|
||||
golang.org/x/text/encoding
|
||||
|
|
@ -657,3 +695,5 @@ gopkg.in/yaml.v2
|
|||
gopkg.in/yaml.v3
|
||||
# k8s.io/klog v1.0.0
|
||||
k8s.io/klog
|
||||
# github.com/sirupsen/logrus => github.com/sirupsen/logrus v1.4.2
|
||||
# github.com/opencontainers/image-spec => github.com/opencontainers/image-spec v1.0.2-0.20211123152302-43a7dee1ec31
|
||||
|
|
|
|||
Loading…
Reference in New Issue