[release-1.35] CVE-2024-3727 updates

Bump c/image to v5.30.1 and c/common to v0.58.3 to addresses:

CVE-2024-3727
https://issues.redhat.com/browse/RHEL-35438
https://issues.redhat.com/browse/RHEL-35441

[NO NEW TESTS NEEDED]

Signed-off-by: tomsweeneyredhat <tsweeney@redhat.com>
This commit is contained in:
tomsweeneyredhat 2024-05-09 15:19:25 -04:00
parent d1d1b54864
commit f905965162
31 changed files with 445 additions and 165 deletions

4
go.mod
View File

@ -5,8 +5,8 @@ go 1.20
require ( require (
github.com/containerd/containerd v1.7.13 github.com/containerd/containerd v1.7.13
github.com/containernetworking/cni v1.1.2 github.com/containernetworking/cni v1.1.2
github.com/containers/common v0.58.1 github.com/containers/common v0.58.3
github.com/containers/image/v5 v5.30.0 github.com/containers/image/v5 v5.30.1
github.com/containers/luksy v0.0.0-20240212203526-ceb12d4fd50c github.com/containers/luksy v0.0.0-20240212203526-ceb12d4fd50c
github.com/containers/ocicrypt v1.1.10 github.com/containers/ocicrypt v1.1.10
github.com/containers/storage v1.53.0 github.com/containers/storage v1.53.0

8
go.sum
View File

@ -58,10 +58,10 @@ github.com/containernetworking/cni v1.1.2 h1:wtRGZVv7olUHMOqouPpn3cXJWpJgM6+EUl3
github.com/containernetworking/cni v1.1.2/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw= github.com/containernetworking/cni v1.1.2/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw=
github.com/containernetworking/plugins v1.4.0 h1:+w22VPYgk7nQHw7KT92lsRmuToHvb7wwSv9iTbXzzic= github.com/containernetworking/plugins v1.4.0 h1:+w22VPYgk7nQHw7KT92lsRmuToHvb7wwSv9iTbXzzic=
github.com/containernetworking/plugins v1.4.0/go.mod h1:UYhcOyjefnrQvKvmmyEKsUA+M9Nfn7tqULPpH0Pkcj0= github.com/containernetworking/plugins v1.4.0/go.mod h1:UYhcOyjefnrQvKvmmyEKsUA+M9Nfn7tqULPpH0Pkcj0=
github.com/containers/common v0.58.1 h1:E1DN9Lr7kgMVQy7AXLv1CYQCiqnweklMiYWbf0KOnqY= github.com/containers/common v0.58.3 h1:Iy/CdYjluEK926QT+ejonz7YvoRHazeW7BAiLIkmUQ4=
github.com/containers/common v0.58.1/go.mod h1:l3vMqanJGj7tZ3W/i76gEJ128VXgFUO1tLaohJXPvdk= github.com/containers/common v0.58.3/go.mod h1:p4V1SNk+WOISgp01m+axuqCUxaDP3WSZPPzvnJnS/cQ=
github.com/containers/image/v5 v5.30.0 h1:CmHeSwI6W2kTRWnUsxATDFY5TEX4b58gPkaQcEyrLIA= github.com/containers/image/v5 v5.30.1 h1:AKrQMgOKI1oKx5FW5eoU2xoNyzACajHGx1O3qxobvFM=
github.com/containers/image/v5 v5.30.0/go.mod h1:gSD8MVOyqBspc0ynLsuiMR9qmt8UQ4jpVImjmK0uXfk= github.com/containers/image/v5 v5.30.1/go.mod h1:gSD8MVOyqBspc0ynLsuiMR9qmt8UQ4jpVImjmK0uXfk=
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
github.com/containers/luksy v0.0.0-20240212203526-ceb12d4fd50c h1:6zalnZZODMOqNZBww9VAM1Mq5EZ3J+S8vYGCo2yg39M= github.com/containers/luksy v0.0.0-20240212203526-ceb12d4fd50c h1:6zalnZZODMOqNZBww9VAM1Mq5EZ3J+S8vYGCo2yg39M=

View File

@ -100,18 +100,37 @@ func (n *Netns) getOrCreateNetns() (ns.NetNS, bool, error) {
nsPath := n.getPath(rootlessNetnsDir) nsPath := n.getPath(rootlessNetnsDir)
nsRef, err := ns.GetNS(nsPath) nsRef, err := ns.GetNS(nsPath)
if err == nil { if err == nil {
// TODO check if slirp4netns is alive pidPath := n.getPath(rootlessNetNsConnPidFile)
return nsRef, false, nil pid, err := readPidFile(pidPath)
} if err == nil {
logrus.Debugf("Creating rootless network namespace at %q", nsPath) // quick check if pasta/slirp4netns are still running
// We have to create the netns dir again here because it is possible err := unix.Kill(pid, 0)
// that cleanup() removed it. if err == nil {
if err := os.MkdirAll(n.dir, 0o700); err != nil { // All good, return the netns.
return nil, false, wrapError("", err) return nsRef, false, nil
} }
netns, err := netns.NewNSAtPath(nsPath) // Print warnings in case things went wrong, we might be able to recover
if err != nil { // but maybe not so make sure to leave some hints so we can figure out what went wrong.
return nil, false, wrapError("create netns", err) if errors.Is(err, unix.ESRCH) {
logrus.Warn("rootless netns program no longer running, trying to start it again")
} else {
logrus.Warnf("failed to check if rootless netns program is running: %v, trying to start it again", err)
}
} else {
logrus.Warnf("failed to read rootless netns program pid: %v", err)
}
// In case of errors continue and setup the network cmd again.
} else {
logrus.Debugf("Creating rootless network namespace at %q", nsPath)
// We have to create the netns dir again here because it is possible
// that cleanup() removed it.
if err := os.MkdirAll(n.dir, 0o700); err != nil {
return nil, false, wrapError("", err)
}
nsRef, err = netns.NewNSAtPath(nsPath)
if err != nil {
return nil, false, wrapError("create netns", err)
}
} }
switch strings.ToLower(n.config.Network.DefaultRootlessNetworkCmd) { switch strings.ToLower(n.config.Network.DefaultRootlessNetworkCmd) {
case "", slirp4netns.BinaryName: case "", slirp4netns.BinaryName:
@ -121,7 +140,17 @@ func (n *Netns) getOrCreateNetns() (ns.NetNS, bool, error) {
default: default:
err = fmt.Errorf("invalid rootless network command %q", n.config.Network.DefaultRootlessNetworkCmd) err = fmt.Errorf("invalid rootless network command %q", n.config.Network.DefaultRootlessNetworkCmd)
} }
return netns, true, err // If pasta or slirp4netns fail here we need to get rid of the netns again to not leak it,
// otherwise the next command thinks the netns was successfully setup.
if err != nil {
if nerr := netns.UnmountNS(nsPath); nerr != nil {
logrus.Error(nerr)
}
_ = nsRef.Close()
return nil, false, err
}
return nsRef, true, nil
} }
func (n *Netns) cleanup() error { func (n *Netns) cleanup() error {
@ -165,11 +194,7 @@ func (n *Netns) setupPasta(nsPath string) error {
if systemd.RunsOnSystemd() { if systemd.RunsOnSystemd() {
// Treat these as fatal - if pasta failed to write a PID file something is probably wrong. // Treat these as fatal - if pasta failed to write a PID file something is probably wrong.
pidfile, err := os.ReadFile(pidPath) pid, err := readPidFile(pidPath)
if err != nil {
return fmt.Errorf("unable to open pasta PID file: %w", err)
}
pid, err := strconv.Atoi(strings.TrimSpace(string(pidfile)))
if err != nil { if err != nil {
return fmt.Errorf("unable to decode pasta PID: %w", err) return fmt.Errorf("unable to decode pasta PID: %w", err)
} }
@ -245,16 +270,12 @@ func (n *Netns) setupSlirp4netns(nsPath string) error {
func (n *Netns) cleanupRootlessNetns() error { func (n *Netns) cleanupRootlessNetns() error {
pidFile := n.getPath(rootlessNetNsConnPidFile) pidFile := n.getPath(rootlessNetNsConnPidFile)
b, err := os.ReadFile(pidFile) pid, err := readPidFile(pidFile)
if err == nil { if err == nil {
var i int // kill the slirp/pasta process so we do not leak it
i, err = strconv.Atoi(strings.TrimSpace(string(b))) err = unix.Kill(pid, unix.SIGTERM)
if err == nil { if err == unix.ESRCH {
// kill the slirp process so we do not leak it err = nil
err = unix.Kill(i, unix.SIGTERM)
if err == unix.ESRCH {
err = nil
}
} }
} }
return err return err
@ -294,6 +315,13 @@ func (n *Netns) setupMounts() error {
return wrapError("create new mount namespace", err) return wrapError("create new mount namespace", err)
} }
// Ensure we mount private in our mountns to prevent accidentally
// overwriting the host mounts in case the default propagation is shared.
err = unix.Mount("", "/", "", unix.MS_PRIVATE|unix.MS_REC, "")
if err != nil {
return wrapError("make tree private in new mount namespace", err)
}
xdgRuntimeDir, err := homedir.GetRuntimeDir() xdgRuntimeDir, err := homedir.GetRuntimeDir()
if err != nil { if err != nil {
return fmt.Errorf("could not get runtime directory: %w", err) return fmt.Errorf("could not get runtime directory: %w", err)
@ -301,7 +329,7 @@ func (n *Netns) setupMounts() error {
newXDGRuntimeDir := n.getPath(xdgRuntimeDir) newXDGRuntimeDir := n.getPath(xdgRuntimeDir)
// 1. Mount the netns into the new run to keep them accessible. // 1. Mount the netns into the new run to keep them accessible.
// Otherwise cni setup will fail because it cannot access the netns files. // Otherwise cni setup will fail because it cannot access the netns files.
err = mountAndMkdirDest(xdgRuntimeDir, newXDGRuntimeDir, none, unix.MS_BIND|unix.MS_SHARED|unix.MS_REC) err = mountAndMkdirDest(xdgRuntimeDir, newXDGRuntimeDir, none, unix.MS_BIND|unix.MS_REC)
if err != nil { if err != nil {
return err return err
} }
@ -556,15 +584,12 @@ func (n *Netns) Run(lock *lockfile.LockFile, toRun func() error) error {
logrus.Errorf("Failed to decrement ref count: %v", err) logrus.Errorf("Failed to decrement ref count: %v", err)
return inErr return inErr
} }
if count == 0 { // runInner() already cleans up the netns when it created a new one on errors
// so we only need to do that if there was no error.
if inErr == nil && count == 0 {
err = n.cleanup() err = n.cleanup()
if err != nil { if err != nil {
err = wrapError("cleanup", err) return wrapError("cleanup", err)
if inErr == nil {
return err
}
logrus.Errorf("Failed to cleanup rootless netns: %v", err)
return inErr
} }
} }
@ -599,3 +624,11 @@ func refCount(dir string, inc int) (int, error) {
return currentCount, nil return currentCount, nil
} }
func readPidFile(path string) (int, error) {
b, err := os.ReadFile(path)
if err != nil {
return 0, err
}
return strconv.Atoi(strings.TrimSpace(string(b)))
}

View File

@ -376,6 +376,11 @@ func (n *netavarkNetwork) NetworkRemove(nameOrID string) error {
return fmt.Errorf("default network %s cannot be removed", n.defaultNetwork) return fmt.Errorf("default network %s cannot be removed", n.defaultNetwork)
} }
// remove the ipam bucket for this network
if err := n.removeNetworkIPAMBucket(network); err != nil {
return err
}
file := filepath.Join(n.networkConfigDir, network.Name+".json") file := filepath.Join(n.networkConfigDir, network.Name+".json")
// make sure to not error for ErrNotExist // make sure to not error for ErrNotExist
if err := os.Remove(file); err != nil && !errors.Is(err, os.ErrNotExist) { if err := os.Remove(file); err != nil && !errors.Is(err, os.ErrNotExist) {

View File

@ -4,6 +4,7 @@ package netavark
import ( import (
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"net" "net"
@ -357,6 +358,26 @@ func (n *netavarkNetwork) deallocIPs(opts *types.NetworkOptions) error {
return err return err
} }
func (n *netavarkNetwork) removeNetworkIPAMBucket(network *types.Network) error {
if !requiresIPAMAlloc(network) {
return nil
}
db, err := n.openDB()
if err != nil {
return err
}
defer db.Close()
return db.Update(func(tx *bbolt.Tx) error {
// Ignore ErrBucketNotFound, can happen if the network never allocated any ips,
// i.e. because no container was started.
if err := tx.DeleteBucket([]byte(network.Name)); err != nil && !errors.Is(err, bbolt.ErrBucketNotFound) {
return err
}
return nil
})
}
// requiresIPAMAlloc return true when we have to allocate ips for this network // requiresIPAMAlloc return true when we have to allocate ips for this network
// it checks the ipam driver and if subnets are set // it checks the ipam driver and if subnets are set
func requiresIPAMAlloc(network *types.Network) bool { func requiresIPAMAlloc(network *types.Network) bool {

View File

@ -135,7 +135,11 @@ func NewNetworkInterface(conf *InitConfig) (types.ContainerNetwork, error) {
} }
var netns *rootlessnetns.Netns var netns *rootlessnetns.Netns
if unshare.IsRootless() { // Do not use unshare.IsRootless() here. We only care if we are running re-exec in the userns,
// IsRootless() also returns true if we are root in a userns which is not what we care about and
// causes issues as this slower more complicated rootless-netns logic should not be used as root.
_, useRootlessNetns := os.LookupEnv(unshare.UsernsEnvName)
if useRootlessNetns {
netns, err = rootlessnetns.New(conf.NetworkRunDir, rootlessnetns.Netavark, conf.Config) netns, err = rootlessnetns.New(conf.NetworkRunDir, rootlessnetns.Netavark, conf.Config)
if err != nil { if err != nil {
return nil, err return nil, err
@ -147,7 +151,7 @@ func NewNetworkInterface(conf *InitConfig) (types.ContainerNetwork, error) {
networkRunDir: conf.NetworkRunDir, networkRunDir: conf.NetworkRunDir,
netavarkBinary: conf.NetavarkBinary, netavarkBinary: conf.NetavarkBinary,
aardvarkBinary: conf.AardvarkBinary, aardvarkBinary: conf.AardvarkBinary,
networkRootless: unshare.IsRootless(), networkRootless: useRootlessNetns,
ipamDBPath: filepath.Join(conf.NetworkRunDir, "ipam.db"), ipamDBPath: filepath.Join(conf.NetworkRunDir, "ipam.db"),
firewallDriver: conf.Config.Network.FirewallDriver, firewallDriver: conf.Config.Network.FirewallDriver,
defaultNetwork: defaultNetworkName, defaultNetwork: defaultNetworkName,

View File

@ -1,4 +1,4 @@
package version package version
// Version is the version of the build. // Version is the version of the build.
const Version = "0.58.1" const Version = "0.58.3"

View File

@ -49,10 +49,13 @@ type progressBar struct {
// As a convention, most users of progress bars should call mark100PercentComplete on full success; // As a convention, most users of progress bars should call mark100PercentComplete on full success;
// by convention, we don't leave progress bars in partial state when fully done // by convention, we don't leave progress bars in partial state when fully done
// (even if we copied much less data than anticipated). // (even if we copied much less data than anticipated).
func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.BlobInfo, kind string, onComplete string) *progressBar { func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.BlobInfo, kind string, onComplete string) (*progressBar, error) {
// shortDigestLen is the length of the digest used for blobs. // shortDigestLen is the length of the digest used for blobs.
const shortDigestLen = 12 const shortDigestLen = 12
if err := info.Digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
return nil, err
}
prefix := fmt.Sprintf("Copying %s %s", kind, info.Digest.Encoded()) prefix := fmt.Sprintf("Copying %s %s", kind, info.Digest.Encoded())
// Truncate the prefix (chopping of some part of the digest) to make all progress bars aligned in a column. // Truncate the prefix (chopping of some part of the digest) to make all progress bars aligned in a column.
maxPrefixLen := len("Copying blob ") + shortDigestLen maxPrefixLen := len("Copying blob ") + shortDigestLen
@ -105,7 +108,7 @@ func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.
return &progressBar{ return &progressBar{
Bar: bar, Bar: bar,
originalSize: info.Size, originalSize: info.Size,
} }, nil
} }
// printCopyInfo prints a "Copying ..." message on the copier if the output is // printCopyInfo prints a "Copying ..." message on the copier if the output is

View File

@ -606,7 +606,10 @@ func (ic *imageCopier) copyConfig(ctx context.Context, src types.Image) error {
destInfo, err := func() (types.BlobInfo, error) { // A scope for defer destInfo, err := func() (types.BlobInfo, error) { // A scope for defer
progressPool := ic.c.newProgressPool() progressPool := ic.c.newProgressPool()
defer progressPool.Wait() defer progressPool.Wait()
bar := ic.c.createProgressBar(progressPool, false, srcInfo, "config", "done") bar, err := ic.c.createProgressBar(progressPool, false, srcInfo, "config", "done")
if err != nil {
return types.BlobInfo{}, err
}
defer bar.Abort(false) defer bar.Abort(false)
ic.c.printCopyInfo("config", srcInfo) ic.c.printCopyInfo("config", srcInfo)
@ -738,15 +741,21 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
} }
if reused { if reused {
logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest) logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest)
func() { // A scope for defer if err := func() error { // A scope for defer
label := "skipped: already exists" label := "skipped: already exists"
if reusedBlob.MatchedByTOCDigest { if reusedBlob.MatchedByTOCDigest {
label = "skipped: already exists (found by TOC)" label = "skipped: already exists (found by TOC)"
} }
bar := ic.c.createProgressBar(pool, false, types.BlobInfo{Digest: reusedBlob.Digest, Size: 0}, "blob", label) bar, err := ic.c.createProgressBar(pool, false, types.BlobInfo{Digest: reusedBlob.Digest, Size: 0}, "blob", label)
if err != nil {
return err
}
defer bar.Abort(false) defer bar.Abort(false)
bar.mark100PercentComplete() bar.mark100PercentComplete()
}() return nil
}(); err != nil {
return types.BlobInfo{}, "", err
}
// Throw an event that the layer has been skipped // Throw an event that the layer has been skipped
if ic.c.options.Progress != nil && ic.c.options.ProgressInterval > 0 { if ic.c.options.Progress != nil && ic.c.options.ProgressInterval > 0 {
@ -765,8 +774,11 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
// Attempt a partial only when the source allows to retrieve a blob partially and // Attempt a partial only when the source allows to retrieve a blob partially and
// the destination has support for it. // the destination has support for it.
if canAvoidProcessingCompleteLayer && ic.c.rawSource.SupportsGetBlobAt() && ic.c.dest.SupportsPutBlobPartial() { if canAvoidProcessingCompleteLayer && ic.c.rawSource.SupportsGetBlobAt() && ic.c.dest.SupportsPutBlobPartial() {
if reused, blobInfo := func() (bool, types.BlobInfo) { // A scope for defer reused, blobInfo, err := func() (bool, types.BlobInfo, error) { // A scope for defer
bar := ic.c.createProgressBar(pool, true, srcInfo, "blob", "done") bar, err := ic.c.createProgressBar(pool, true, srcInfo, "blob", "done")
if err != nil {
return false, types.BlobInfo{}, err
}
hideProgressBar := true hideProgressBar := true
defer func() { // Note that this is not the same as defer bar.Abort(hideProgressBar); we need hideProgressBar to be evaluated lazily. defer func() { // Note that this is not the same as defer bar.Abort(hideProgressBar); we need hideProgressBar to be evaluated lazily.
bar.Abort(hideProgressBar) bar.Abort(hideProgressBar)
@ -789,18 +801,25 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
bar.mark100PercentComplete() bar.mark100PercentComplete()
hideProgressBar = false hideProgressBar = false
logrus.Debugf("Retrieved partial blob %v", srcInfo.Digest) logrus.Debugf("Retrieved partial blob %v", srcInfo.Digest)
return true, updatedBlobInfoFromUpload(srcInfo, uploadedBlob) return true, updatedBlobInfoFromUpload(srcInfo, uploadedBlob), nil
} }
logrus.Debugf("Failed to retrieve partial blob: %v", err) logrus.Debugf("Failed to retrieve partial blob: %v", err)
return false, types.BlobInfo{} return false, types.BlobInfo{}, nil
}(); reused { }()
if err != nil {
return types.BlobInfo{}, "", err
}
if reused {
return blobInfo, cachedDiffID, nil return blobInfo, cachedDiffID, nil
} }
} }
// Fallback: copy the layer, computing the diffID if we need to do so // Fallback: copy the layer, computing the diffID if we need to do so
return func() (types.BlobInfo, digest.Digest, error) { // A scope for defer return func() (types.BlobInfo, digest.Digest, error) { // A scope for defer
bar := ic.c.createProgressBar(pool, false, srcInfo, "blob", "done") bar, err := ic.c.createProgressBar(pool, false, srcInfo, "blob", "done")
if err != nil {
return types.BlobInfo{}, "", err
}
defer bar.Abort(false) defer bar.Abort(false)
srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache) srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache)

View File

@ -173,7 +173,10 @@ func (d *dirImageDestination) PutBlobWithOptions(ctx context.Context, stream io.
} }
} }
blobPath := d.ref.layerPath(blobDigest) blobPath, err := d.ref.layerPath(blobDigest)
if err != nil {
return private.UploadedBlob{}, err
}
// need to explicitly close the file, since a rename won't otherwise not work on Windows // need to explicitly close the file, since a rename won't otherwise not work on Windows
blobFile.Close() blobFile.Close()
explicitClosed = true explicitClosed = true
@ -196,7 +199,10 @@ func (d *dirImageDestination) TryReusingBlobWithOptions(ctx context.Context, inf
if info.Digest == "" { if info.Digest == "" {
return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with unknown digest") return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with unknown digest")
} }
blobPath := d.ref.layerPath(info.Digest) blobPath, err := d.ref.layerPath(info.Digest)
if err != nil {
return false, private.ReusedBlob{}, err
}
finfo, err := os.Stat(blobPath) finfo, err := os.Stat(blobPath)
if err != nil && os.IsNotExist(err) { if err != nil && os.IsNotExist(err) {
return false, private.ReusedBlob{}, nil return false, private.ReusedBlob{}, nil
@ -216,7 +222,11 @@ func (d *dirImageDestination) TryReusingBlobWithOptions(ctx context.Context, inf
// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), // If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. // but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
func (d *dirImageDestination) PutManifest(ctx context.Context, manifest []byte, instanceDigest *digest.Digest) error { func (d *dirImageDestination) PutManifest(ctx context.Context, manifest []byte, instanceDigest *digest.Digest) error {
return os.WriteFile(d.ref.manifestPath(instanceDigest), manifest, 0644) path, err := d.ref.manifestPath(instanceDigest)
if err != nil {
return err
}
return os.WriteFile(path, manifest, 0644)
} }
// PutSignaturesWithFormat writes a set of signatures to the destination. // PutSignaturesWithFormat writes a set of signatures to the destination.
@ -229,7 +239,11 @@ func (d *dirImageDestination) PutSignaturesWithFormat(ctx context.Context, signa
if err != nil { if err != nil {
return err return err
} }
if err := os.WriteFile(d.ref.signaturePath(i, instanceDigest), blob, 0644); err != nil { path, err := d.ref.signaturePath(i, instanceDigest)
if err != nil {
return err
}
if err := os.WriteFile(path, blob, 0644); err != nil {
return err return err
} }
} }

View File

@ -55,7 +55,11 @@ func (s *dirImageSource) Close() error {
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list);
// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). // this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
func (s *dirImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { func (s *dirImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
m, err := os.ReadFile(s.ref.manifestPath(instanceDigest)) path, err := s.ref.manifestPath(instanceDigest)
if err != nil {
return nil, "", err
}
m, err := os.ReadFile(path)
if err != nil { if err != nil {
return nil, "", err return nil, "", err
} }
@ -66,7 +70,11 @@ func (s *dirImageSource) GetManifest(ctx context.Context, instanceDigest *digest
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
func (s *dirImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { func (s *dirImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
r, err := os.Open(s.ref.layerPath(info.Digest)) path, err := s.ref.layerPath(info.Digest)
if err != nil {
return nil, -1, err
}
r, err := os.Open(path)
if err != nil { if err != nil {
return nil, -1, err return nil, -1, err
} }
@ -84,7 +92,10 @@ func (s *dirImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache
func (s *dirImageSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) { func (s *dirImageSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
signatures := []signature.Signature{} signatures := []signature.Signature{}
for i := 0; ; i++ { for i := 0; ; i++ {
path := s.ref.signaturePath(i, instanceDigest) path, err := s.ref.signaturePath(i, instanceDigest)
if err != nil {
return nil, err
}
sigBlob, err := os.ReadFile(path) sigBlob, err := os.ReadFile(path)
if err != nil { if err != nil {
if os.IsNotExist(err) { if os.IsNotExist(err) {

View File

@ -161,25 +161,34 @@ func (ref dirReference) DeleteImage(ctx context.Context, sys *types.SystemContex
} }
// manifestPath returns a path for the manifest within a directory using our conventions. // manifestPath returns a path for the manifest within a directory using our conventions.
func (ref dirReference) manifestPath(instanceDigest *digest.Digest) string { func (ref dirReference) manifestPath(instanceDigest *digest.Digest) (string, error) {
if instanceDigest != nil { if instanceDigest != nil {
return filepath.Join(ref.path, instanceDigest.Encoded()+".manifest.json") if err := instanceDigest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, and could possibly result in a path with ../, so validate explicitly.
return "", err
}
return filepath.Join(ref.path, instanceDigest.Encoded()+".manifest.json"), nil
} }
return filepath.Join(ref.path, "manifest.json") return filepath.Join(ref.path, "manifest.json"), nil
} }
// layerPath returns a path for a layer tarball within a directory using our conventions. // layerPath returns a path for a layer tarball within a directory using our conventions.
func (ref dirReference) layerPath(digest digest.Digest) string { func (ref dirReference) layerPath(digest digest.Digest) (string, error) {
if err := digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, and could possibly result in a path with ../, so validate explicitly.
return "", err
}
// FIXME: Should we keep the digest identification? // FIXME: Should we keep the digest identification?
return filepath.Join(ref.path, digest.Encoded()) return filepath.Join(ref.path, digest.Encoded()), nil
} }
// signaturePath returns a path for a signature within a directory using our conventions. // signaturePath returns a path for a signature within a directory using our conventions.
func (ref dirReference) signaturePath(index int, instanceDigest *digest.Digest) string { func (ref dirReference) signaturePath(index int, instanceDigest *digest.Digest) (string, error) {
if instanceDigest != nil { if instanceDigest != nil {
return filepath.Join(ref.path, fmt.Sprintf(instanceDigest.Encoded()+".signature-%d", index+1)) if err := instanceDigest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, and could possibly result in a path with ../, so validate explicitly.
return "", err
}
return filepath.Join(ref.path, fmt.Sprintf(instanceDigest.Encoded()+".signature-%d", index+1)), nil
} }
return filepath.Join(ref.path, fmt.Sprintf("signature-%d", index+1)) return filepath.Join(ref.path, fmt.Sprintf("signature-%d", index+1)), nil
} }
// versionPath returns a path for the version file within a directory using our conventions. // versionPath returns a path for the version file within a directory using our conventions.

View File

@ -952,6 +952,8 @@ func (c *dockerClient) detectProperties(ctx context.Context) error {
return c.detectPropertiesError return c.detectPropertiesError
} }
// fetchManifest fetches a manifest for (the repo of ref) + tagOrDigest.
// The caller is responsible for ensuring tagOrDigest uses the expected format.
func (c *dockerClient) fetchManifest(ctx context.Context, ref dockerReference, tagOrDigest string) ([]byte, string, error) { func (c *dockerClient) fetchManifest(ctx context.Context, ref dockerReference, tagOrDigest string) ([]byte, string, error) {
path := fmt.Sprintf(manifestPath, reference.Path(ref.ref), tagOrDigest) path := fmt.Sprintf(manifestPath, reference.Path(ref.ref), tagOrDigest)
headers := map[string][]string{ headers := map[string][]string{
@ -1035,6 +1037,9 @@ func (c *dockerClient) getBlob(ctx context.Context, ref dockerReference, info ty
} }
} }
if err := info.Digest.Validate(); err != nil { // Make sure info.Digest.String() does not contain any unexpected characters
return nil, 0, err
}
path := fmt.Sprintf(blobsPath, reference.Path(ref.ref), info.Digest.String()) path := fmt.Sprintf(blobsPath, reference.Path(ref.ref), info.Digest.String())
logrus.Debugf("Downloading %s", path) logrus.Debugf("Downloading %s", path)
res, err := c.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil) res, err := c.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil)
@ -1098,7 +1103,10 @@ func isManifestUnknownError(err error) bool {
// digest in ref. // digest in ref.
// It returns (nil, nil) if the manifest does not exist. // It returns (nil, nil) if the manifest does not exist.
func (c *dockerClient) getSigstoreAttachmentManifest(ctx context.Context, ref dockerReference, digest digest.Digest) (*manifest.OCI1, error) { func (c *dockerClient) getSigstoreAttachmentManifest(ctx context.Context, ref dockerReference, digest digest.Digest) (*manifest.OCI1, error) {
tag := sigstoreAttachmentTag(digest) tag, err := sigstoreAttachmentTag(digest)
if err != nil {
return nil, err
}
sigstoreRef, err := reference.WithTag(reference.TrimNamed(ref.ref), tag) sigstoreRef, err := reference.WithTag(reference.TrimNamed(ref.ref), tag)
if err != nil { if err != nil {
return nil, err return nil, err
@ -1131,6 +1139,9 @@ func (c *dockerClient) getSigstoreAttachmentManifest(ctx context.Context, ref do
// getExtensionsSignatures returns signatures from the X-Registry-Supports-Signatures API extension, // getExtensionsSignatures returns signatures from the X-Registry-Supports-Signatures API extension,
// using the original data structures. // using the original data structures.
func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerReference, manifestDigest digest.Digest) (*extensionSignatureList, error) { func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerReference, manifestDigest digest.Digest) (*extensionSignatureList, error) {
if err := manifestDigest.Validate(); err != nil { // Make sure manifestDigest.String() does not contain any unexpected characters
return nil, err
}
path := fmt.Sprintf(extensionsSignaturePath, reference.Path(ref.ref), manifestDigest) path := fmt.Sprintf(extensionsSignaturePath, reference.Path(ref.ref), manifestDigest)
res, err := c.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil) res, err := c.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil)
if err != nil { if err != nil {
@ -1154,8 +1165,11 @@ func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerRe
} }
// sigstoreAttachmentTag returns a sigstore attachment tag for the specified digest. // sigstoreAttachmentTag returns a sigstore attachment tag for the specified digest.
func sigstoreAttachmentTag(d digest.Digest) string { func sigstoreAttachmentTag(d digest.Digest) (string, error) {
return strings.Replace(d.String(), ":", "-", 1) + ".sig" if err := d.Validate(); err != nil { // Make sure d.String() doesnt contain any unexpected characters
return "", err
}
return strings.Replace(d.String(), ":", "-", 1) + ".sig", nil
} }
// Close removes resources associated with an initialized dockerClient, if any. // Close removes resources associated with an initialized dockerClient, if any.

View File

@ -88,7 +88,12 @@ func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types.
if err = json.NewDecoder(res.Body).Decode(&tagsHolder); err != nil { if err = json.NewDecoder(res.Body).Decode(&tagsHolder); err != nil {
return nil, err return nil, err
} }
tags = append(tags, tagsHolder.Tags...) for _, tag := range tagsHolder.Tags {
if _, err := reference.WithTag(dr.ref, tag); err != nil { // Ensure the tag does not contain unexpected values
return nil, fmt.Errorf("registry returned invalid tag %q: %w", tag, err)
}
tags = append(tags, tag)
}
link := res.Header.Get("Link") link := res.Header.Get("Link")
if link == "" { if link == "" {

View File

@ -230,6 +230,9 @@ func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream
// If the destination does not contain the blob, or it is unknown, blobExists ordinarily returns (false, -1, nil); // If the destination does not contain the blob, or it is unknown, blobExists ordinarily returns (false, -1, nil);
// it returns a non-nil error only on an unexpected failure. // it returns a non-nil error only on an unexpected failure.
func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference.Named, digest digest.Digest, extraScope *authScope) (bool, int64, error) { func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference.Named, digest digest.Digest, extraScope *authScope) (bool, int64, error) {
if err := digest.Validate(); err != nil { // Make sure digest.String() does not contain any unexpected characters
return false, -1, err
}
checkPath := fmt.Sprintf(blobsPath, reference.Path(repo), digest.String()) checkPath := fmt.Sprintf(blobsPath, reference.Path(repo), digest.String())
logrus.Debugf("Checking %s", checkPath) logrus.Debugf("Checking %s", checkPath)
res, err := d.c.makeRequest(ctx, http.MethodHead, checkPath, nil, nil, v2Auth, extraScope) res, err := d.c.makeRequest(ctx, http.MethodHead, checkPath, nil, nil, v2Auth, extraScope)
@ -469,6 +472,7 @@ func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte, inst
// particular instance. // particular instance.
refTail = instanceDigest.String() refTail = instanceDigest.String()
// Double-check that the manifest we've been given matches the digest we've been given. // Double-check that the manifest we've been given matches the digest we've been given.
// This also validates the format of instanceDigest.
matches, err := manifest.MatchesDigest(m, *instanceDigest) matches, err := manifest.MatchesDigest(m, *instanceDigest)
if err != nil { if err != nil {
return fmt.Errorf("digesting manifest in PutManifest: %w", err) return fmt.Errorf("digesting manifest in PutManifest: %w", err)
@ -635,11 +639,13 @@ func (d *dockerImageDestination) putSignaturesToLookaside(signatures []signature
// NOTE: Keep this in sync with docs/signature-protocols.md! // NOTE: Keep this in sync with docs/signature-protocols.md!
for i, signature := range signatures { for i, signature := range signatures {
sigURL := lookasideStorageURL(d.c.signatureBase, manifestDigest, i) sigURL, err := lookasideStorageURL(d.c.signatureBase, manifestDigest, i)
err := d.putOneSignature(sigURL, signature)
if err != nil { if err != nil {
return err return err
} }
if err := d.putOneSignature(sigURL, signature); err != nil {
return err
}
} }
// Remove any other signatures, if present. // Remove any other signatures, if present.
// We stop at the first missing signature; if a previous deleting loop aborted // We stop at the first missing signature; if a previous deleting loop aborted
@ -647,7 +653,10 @@ func (d *dockerImageDestination) putSignaturesToLookaside(signatures []signature
// is enough for dockerImageSource to stop looking for other signatures, so that // is enough for dockerImageSource to stop looking for other signatures, so that
// is sufficient. // is sufficient.
for i := len(signatures); ; i++ { for i := len(signatures); ; i++ {
sigURL := lookasideStorageURL(d.c.signatureBase, manifestDigest, i) sigURL, err := lookasideStorageURL(d.c.signatureBase, manifestDigest, i)
if err != nil {
return err
}
missing, err := d.c.deleteOneSignature(sigURL) missing, err := d.c.deleteOneSignature(sigURL)
if err != nil { if err != nil {
return err return err
@ -778,8 +787,12 @@ func (d *dockerImageDestination) putSignaturesToSigstoreAttachments(ctx context.
if err != nil { if err != nil {
return err return err
} }
attachmentTag, err := sigstoreAttachmentTag(manifestDigest)
if err != nil {
return err
}
logrus.Debugf("Uploading sigstore attachment manifest") logrus.Debugf("Uploading sigstore attachment manifest")
return d.uploadManifest(ctx, manifestBlob, sigstoreAttachmentTag(manifestDigest)) return d.uploadManifest(ctx, manifestBlob, attachmentTag)
} }
func layerMatchesSigstoreSignature(layer imgspecv1.Descriptor, mimeType string, func layerMatchesSigstoreSignature(layer imgspecv1.Descriptor, mimeType string,
@ -895,6 +908,7 @@ func (d *dockerImageDestination) putSignaturesToAPIExtension(ctx context.Context
return err return err
} }
// manifestDigest is known to be valid because it was not rejected by getExtensionsSignatures above.
path := fmt.Sprintf(extensionsSignaturePath, reference.Path(d.ref.ref), manifestDigest.String()) path := fmt.Sprintf(extensionsSignaturePath, reference.Path(d.ref.ref), manifestDigest.String())
res, err := d.c.makeRequest(ctx, http.MethodPut, path, nil, bytes.NewReader(body), v2Auth, nil) res, err := d.c.makeRequest(ctx, http.MethodPut, path, nil, bytes.NewReader(body), v2Auth, nil)
if err != nil { if err != nil {

View File

@ -194,6 +194,9 @@ func simplifyContentType(contentType string) string {
// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). // this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
func (s *dockerImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { func (s *dockerImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
if instanceDigest != nil { if instanceDigest != nil {
if err := instanceDigest.Validate(); err != nil { // Make sure instanceDigest.String() does not contain any unexpected characters
return nil, "", err
}
return s.fetchManifest(ctx, instanceDigest.String()) return s.fetchManifest(ctx, instanceDigest.String())
} }
err := s.ensureManifestIsLoaded(ctx) err := s.ensureManifestIsLoaded(ctx)
@ -203,6 +206,8 @@ func (s *dockerImageSource) GetManifest(ctx context.Context, instanceDigest *dig
return s.cachedManifest, s.cachedManifestMIMEType, nil return s.cachedManifest, s.cachedManifestMIMEType, nil
} }
// fetchManifest fetches a manifest for tagOrDigest.
// The caller is responsible for ensuring tagOrDigest uses the expected format.
func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest string) ([]byte, string, error) { func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest string) ([]byte, string, error) {
return s.c.fetchManifest(ctx, s.physicalRef, tagOrDigest) return s.c.fetchManifest(ctx, s.physicalRef, tagOrDigest)
} }
@ -352,6 +357,9 @@ func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo,
return nil, nil, fmt.Errorf("external URLs not supported with GetBlobAt") return nil, nil, fmt.Errorf("external URLs not supported with GetBlobAt")
} }
if err := info.Digest.Validate(); err != nil { // Make sure info.Digest.String() does not contain any unexpected characters
return nil, nil, err
}
path := fmt.Sprintf(blobsPath, reference.Path(s.physicalRef.ref), info.Digest.String()) path := fmt.Sprintf(blobsPath, reference.Path(s.physicalRef.ref), info.Digest.String())
logrus.Debugf("Downloading %s", path) logrus.Debugf("Downloading %s", path)
res, err := s.c.makeRequest(ctx, http.MethodGet, path, headers, nil, v2Auth, nil) res, err := s.c.makeRequest(ctx, http.MethodGet, path, headers, nil, v2Auth, nil)
@ -462,7 +470,10 @@ func (s *dockerImageSource) getSignaturesFromLookaside(ctx context.Context, inst
return nil, fmt.Errorf("server provided %d signatures, assuming that's unreasonable and a server error", maxLookasideSignatures) return nil, fmt.Errorf("server provided %d signatures, assuming that's unreasonable and a server error", maxLookasideSignatures)
} }
sigURL := lookasideStorageURL(s.c.signatureBase, manifestDigest, i) sigURL, err := lookasideStorageURL(s.c.signatureBase, manifestDigest, i)
if err != nil {
return nil, err
}
signature, missing, err := s.getOneSignature(ctx, sigURL) signature, missing, err := s.getOneSignature(ctx, sigURL)
if err != nil { if err != nil {
return nil, err return nil, err
@ -660,7 +671,10 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere
} }
for i := 0; ; i++ { for i := 0; ; i++ {
sigURL := lookasideStorageURL(c.signatureBase, manifestDigest, i) sigURL, err := lookasideStorageURL(c.signatureBase, manifestDigest, i)
if err != nil {
return err
}
missing, err := c.deleteOneSignature(sigURL) missing, err := c.deleteOneSignature(sigURL)
if err != nil { if err != nil {
return err return err

View File

@ -111,11 +111,19 @@ func (d *Destination) PutBlobWithOptions(ctx context.Context, stream io.Reader,
return private.UploadedBlob{}, fmt.Errorf("reading Config file stream: %w", err) return private.UploadedBlob{}, fmt.Errorf("reading Config file stream: %w", err)
} }
d.config = buf d.config = buf
if err := d.archive.sendFileLocked(d.archive.configPath(inputInfo.Digest), inputInfo.Size, bytes.NewReader(buf)); err != nil { configPath, err := d.archive.configPath(inputInfo.Digest)
if err != nil {
return private.UploadedBlob{}, err
}
if err := d.archive.sendFileLocked(configPath, inputInfo.Size, bytes.NewReader(buf)); err != nil {
return private.UploadedBlob{}, fmt.Errorf("writing Config file: %w", err) return private.UploadedBlob{}, fmt.Errorf("writing Config file: %w", err)
} }
} else { } else {
if err := d.archive.sendFileLocked(d.archive.physicalLayerPath(inputInfo.Digest), inputInfo.Size, stream); err != nil { layerPath, err := d.archive.physicalLayerPath(inputInfo.Digest)
if err != nil {
return private.UploadedBlob{}, err
}
if err := d.archive.sendFileLocked(layerPath, inputInfo.Size, stream); err != nil {
return private.UploadedBlob{}, err return private.UploadedBlob{}, err
} }
} }

View File

@ -95,7 +95,10 @@ func (w *Writer) ensureSingleLegacyLayerLocked(layerID string, layerDigest diges
if !w.legacyLayers.Contains(layerID) { if !w.legacyLayers.Contains(layerID) {
// Create a symlink for the legacy format, where there is one subdirectory per layer ("image"). // Create a symlink for the legacy format, where there is one subdirectory per layer ("image").
// See also the comment in physicalLayerPath. // See also the comment in physicalLayerPath.
physicalLayerPath := w.physicalLayerPath(layerDigest) physicalLayerPath, err := w.physicalLayerPath(layerDigest)
if err != nil {
return err
}
if err := w.sendSymlinkLocked(filepath.Join(layerID, legacyLayerFileName), filepath.Join("..", physicalLayerPath)); err != nil { if err := w.sendSymlinkLocked(filepath.Join(layerID, legacyLayerFileName), filepath.Join("..", physicalLayerPath)); err != nil {
return fmt.Errorf("creating layer symbolic link: %w", err) return fmt.Errorf("creating layer symbolic link: %w", err)
} }
@ -139,6 +142,9 @@ func (w *Writer) writeLegacyMetadataLocked(layerDescriptors []manifest.Schema2De
} }
// This chainID value matches the computation in docker/docker/layer.CreateChainID … // This chainID value matches the computation in docker/docker/layer.CreateChainID …
if err := l.Digest.Validate(); err != nil { // This should never fail on this code path, still: make sure the chainID computation is unambiguous.
return err
}
if chainID == "" { if chainID == "" {
chainID = l.Digest chainID = l.Digest
} else { } else {
@ -204,12 +210,20 @@ func checkManifestItemsMatch(a, b *ManifestItem) error {
func (w *Writer) ensureManifestItemLocked(layerDescriptors []manifest.Schema2Descriptor, configDigest digest.Digest, repoTags []reference.NamedTagged) error { func (w *Writer) ensureManifestItemLocked(layerDescriptors []manifest.Schema2Descriptor, configDigest digest.Digest, repoTags []reference.NamedTagged) error {
layerPaths := []string{} layerPaths := []string{}
for _, l := range layerDescriptors { for _, l := range layerDescriptors {
layerPaths = append(layerPaths, w.physicalLayerPath(l.Digest)) p, err := w.physicalLayerPath(l.Digest)
if err != nil {
return err
}
layerPaths = append(layerPaths, p)
} }
var item *ManifestItem var item *ManifestItem
configPath, err := w.configPath(configDigest)
if err != nil {
return err
}
newItem := ManifestItem{ newItem := ManifestItem{
Config: w.configPath(configDigest), Config: configPath,
RepoTags: []string{}, RepoTags: []string{},
Layers: layerPaths, Layers: layerPaths,
Parent: "", // We dont have this information Parent: "", // We dont have this information
@ -294,21 +308,27 @@ func (w *Writer) Close() error {
// configPath returns a path we choose for storing a config with the specified digest. // configPath returns a path we choose for storing a config with the specified digest.
// NOTE: This is an internal implementation detail, not a format property, and can change // NOTE: This is an internal implementation detail, not a format property, and can change
// any time. // any time.
func (w *Writer) configPath(configDigest digest.Digest) string { func (w *Writer) configPath(configDigest digest.Digest) (string, error) {
return configDigest.Hex() + ".json" if err := configDigest.Validate(); err != nil { // digest.Digest.Hex() panics on failure, and could possibly result in unexpected paths, so validate explicitly.
return "", err
}
return configDigest.Hex() + ".json", nil
} }
// physicalLayerPath returns a path we choose for storing a layer with the specified digest // physicalLayerPath returns a path we choose for storing a layer with the specified digest
// (the actual path, i.e. a regular file, not a symlink that may be used in the legacy format). // (the actual path, i.e. a regular file, not a symlink that may be used in the legacy format).
// NOTE: This is an internal implementation detail, not a format property, and can change // NOTE: This is an internal implementation detail, not a format property, and can change
// any time. // any time.
func (w *Writer) physicalLayerPath(layerDigest digest.Digest) string { func (w *Writer) physicalLayerPath(layerDigest digest.Digest) (string, error) {
if err := layerDigest.Validate(); err != nil { // digest.Digest.Hex() panics on failure, and could possibly result in unexpected paths, so validate explicitly.
return "", err
}
// Note that this can't be e.g. filepath.Join(l.Digest.Hex(), legacyLayerFileName); due to the way // Note that this can't be e.g. filepath.Join(l.Digest.Hex(), legacyLayerFileName); due to the way
// writeLegacyMetadata constructs layer IDs differently from inputinfo.Digest values (as described // writeLegacyMetadata constructs layer IDs differently from inputinfo.Digest values (as described
// inside it), most of the layers would end up in subdirectories alone without any metadata; (docker load) // inside it), most of the layers would end up in subdirectories alone without any metadata; (docker load)
// tries to load every subdirectory as an image and fails if the config is missing. So, keep the layers // tries to load every subdirectory as an image and fails if the config is missing. So, keep the layers
// in the root of the tarball. // in the root of the tarball.
return layerDigest.Hex() + ".tar" return layerDigest.Hex() + ".tar", nil
} }
type tarFI struct { type tarFI struct {

View File

@ -286,8 +286,11 @@ func (ns registryNamespace) signatureTopLevel(write bool) string {
// lookasideStorageURL returns an URL usable for accessing signature index in base with known manifestDigest. // lookasideStorageURL returns an URL usable for accessing signature index in base with known manifestDigest.
// base is not nil from the caller // base is not nil from the caller
// NOTE: Keep this in sync with docs/signature-protocols.md! // NOTE: Keep this in sync with docs/signature-protocols.md!
func lookasideStorageURL(base lookasideStorageBase, manifestDigest digest.Digest, index int) *url.URL { func lookasideStorageURL(base lookasideStorageBase, manifestDigest digest.Digest, index int) (*url.URL, error) {
if err := manifestDigest.Validate(); err != nil { // digest.Digest.Hex() panics on failure, and could possibly result in a path with ../, so validate explicitly.
return nil, err
}
sigURL := *base sigURL := *base
sigURL.Path = fmt.Sprintf("%s@%s=%s/signature-%d", sigURL.Path, manifestDigest.Algorithm(), manifestDigest.Hex(), index+1) sigURL.Path = fmt.Sprintf("%s@%s=%s/signature-%d", sigURL.Path, manifestDigest.Algorithm(), manifestDigest.Hex(), index+1)
return &sigURL return &sigURL, nil
} }

View File

@ -109,6 +109,9 @@ func (s *openshiftImageSource) GetSignaturesWithFormat(ctx context.Context, inst
} }
imageStreamImageName = s.imageStreamImageName imageStreamImageName = s.imageStreamImageName
} else { } else {
if err := instanceDigest.Validate(); err != nil { // Make sure instanceDigest.String() does not contain any unexpected characters
return nil, err
}
imageStreamImageName = instanceDigest.String() imageStreamImageName = instanceDigest.String()
} }
image, err := s.client.getImage(ctx, imageStreamImageName) image, err := s.client.getImage(ctx, imageStreamImageName)

View File

@ -345,6 +345,10 @@ func (d *ostreeImageDestination) TryReusingBlobWithOptions(ctx context.Context,
} }
d.repo = repo d.repo = repo
} }
if err := info.Digest.Validate(); err != nil { // digest.Digest.Hex() panics on failure, so validate explicitly.
return false, private.ReusedBlob{}, err
}
branch := fmt.Sprintf("ociimage/%s", info.Digest.Hex()) branch := fmt.Sprintf("ociimage/%s", info.Digest.Hex())
found, data, err := readMetadata(d.repo, branch, "docker.uncompressed_digest") found, data, err := readMetadata(d.repo, branch, "docker.uncompressed_digest")
@ -470,12 +474,18 @@ func (d *ostreeImageDestination) Commit(context.Context, types.UnparsedImage) er
return nil return nil
} }
for _, layer := range d.schema.LayersDescriptors { for _, layer := range d.schema.LayersDescriptors {
if err := layer.Digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
return err
}
hash := layer.Digest.Hex() hash := layer.Digest.Hex()
if err = checkLayer(hash); err != nil { if err = checkLayer(hash); err != nil {
return err return err
} }
} }
for _, layer := range d.schema.FSLayers { for _, layer := range d.schema.FSLayers {
if err := layer.BlobSum.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
return err
}
hash := layer.BlobSum.Hex() hash := layer.BlobSum.Hex()
if err = checkLayer(hash); err != nil { if err = checkLayer(hash); err != nil {
return err return err

View File

@ -286,7 +286,9 @@ func (s *ostreeImageSource) readSingleFile(commit, path string) (io.ReadCloser,
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
if err := info.Digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
return nil, -1, err
}
blob := info.Digest.Hex() blob := info.Digest.Hex()
// Ensure s.compressed is initialized. It is build by LayerInfosForCopy. // Ensure s.compressed is initialized. It is build by LayerInfosForCopy.

View File

@ -78,12 +78,15 @@ func (b *BlobCache) DeleteImage(ctx context.Context, sys *types.SystemContext) e
} }
// blobPath returns the path appropriate for storing a blob with digest. // blobPath returns the path appropriate for storing a blob with digest.
func (b *BlobCache) blobPath(digest digest.Digest, isConfig bool) string { func (b *BlobCache) blobPath(digest digest.Digest, isConfig bool) (string, error) {
if err := digest.Validate(); err != nil { // Make sure digest.String() does not contain any unexpected characters
return "", err
}
baseName := digest.String() baseName := digest.String()
if isConfig { if isConfig {
baseName += ".config" baseName += ".config"
} }
return filepath.Join(b.directory, baseName) return filepath.Join(b.directory, baseName), nil
} }
// findBlob checks if we have a blob for info in cache (whether a config or not) // findBlob checks if we have a blob for info in cache (whether a config or not)
@ -95,7 +98,10 @@ func (b *BlobCache) findBlob(info types.BlobInfo) (string, int64, bool, error) {
} }
for _, isConfig := range []bool{false, true} { for _, isConfig := range []bool{false, true} {
path := b.blobPath(info.Digest, isConfig) path, err := b.blobPath(info.Digest, isConfig)
if err != nil {
return "", -1, false, err
}
fileInfo, err := os.Stat(path) fileInfo, err := os.Stat(path)
if err == nil && (info.Size == -1 || info.Size == fileInfo.Size()) { if err == nil && (info.Size == -1 || info.Size == fileInfo.Size()) {
return path, fileInfo.Size(), isConfig, nil return path, fileInfo.Size(), isConfig, nil

View File

@ -79,47 +79,58 @@ func (d *blobCacheDestination) IgnoresEmbeddedDockerReference() bool {
// and this new file. // and this new file.
func (d *blobCacheDestination) saveStream(wg *sync.WaitGroup, decompressReader io.ReadCloser, tempFile *os.File, compressedFilename string, compressedDigest digest.Digest, isConfig bool, alternateDigest *digest.Digest) { func (d *blobCacheDestination) saveStream(wg *sync.WaitGroup, decompressReader io.ReadCloser, tempFile *os.File, compressedFilename string, compressedDigest digest.Digest, isConfig bool, alternateDigest *digest.Digest) {
defer wg.Done() defer wg.Done()
// Decompress from and digest the reading end of that pipe. defer decompressReader.Close()
decompressed, err3 := archive.DecompressStream(decompressReader)
succeeded := false
defer func() {
if !succeeded {
// Remove the temporary file.
if err := os.Remove(tempFile.Name()); err != nil {
logrus.Debugf("error cleaning up temporary file %q for decompressed copy of blob %q: %v", tempFile.Name(), compressedDigest.String(), err)
}
}
}()
digester := digest.Canonical.Digester() digester := digest.Canonical.Digester()
if err3 == nil { if err := func() error { // A scope for defer
defer tempFile.Close()
// Decompress from and digest the reading end of that pipe.
decompressed, err := archive.DecompressStream(decompressReader)
if err != nil {
// Drain the pipe to keep from stalling the PutBlob() thread.
if _, err2 := io.Copy(io.Discard, decompressReader); err2 != nil {
logrus.Debugf("error draining the pipe: %v", err2)
}
return err
}
defer decompressed.Close()
// Read the decompressed data through the filter over the pipe, blocking until the // Read the decompressed data through the filter over the pipe, blocking until the
// writing end is closed. // writing end is closed.
_, err3 = io.Copy(io.MultiWriter(tempFile, digester.Hash()), decompressed) _, err = io.Copy(io.MultiWriter(tempFile, digester.Hash()), decompressed)
} else { return err
// Drain the pipe to keep from stalling the PutBlob() thread. }(); err != nil {
if _, err := io.Copy(io.Discard, decompressReader); err != nil { return
logrus.Debugf("error draining the pipe: %v", err)
}
} }
decompressReader.Close()
decompressed.Close()
tempFile.Close()
// Determine the name that we should give to the uncompressed copy of the blob. // Determine the name that we should give to the uncompressed copy of the blob.
decompressedFilename := d.reference.blobPath(digester.Digest(), isConfig) decompressedFilename, err := d.reference.blobPath(digester.Digest(), isConfig)
if err3 == nil { if err != nil {
// Rename the temporary file. return
if err3 = os.Rename(tempFile.Name(), decompressedFilename); err3 != nil { }
logrus.Debugf("error renaming new decompressed copy of blob %q into place at %q: %v", digester.Digest().String(), decompressedFilename, err3) // Rename the temporary file.
// Remove the temporary file. if err := os.Rename(tempFile.Name(), decompressedFilename); err != nil {
if err3 = os.Remove(tempFile.Name()); err3 != nil { logrus.Debugf("error renaming new decompressed copy of blob %q into place at %q: %v", digester.Digest().String(), decompressedFilename, err)
logrus.Debugf("error cleaning up temporary file %q for decompressed copy of blob %q: %v", tempFile.Name(), compressedDigest.String(), err3) return
} }
} else { succeeded = true
*alternateDigest = digester.Digest() *alternateDigest = digester.Digest()
// Note the relationship between the two files. // Note the relationship between the two files.
if err3 = ioutils.AtomicWriteFile(decompressedFilename+compressedNote, []byte(compressedDigest.String()), 0600); err3 != nil { if err := ioutils.AtomicWriteFile(decompressedFilename+compressedNote, []byte(compressedDigest.String()), 0600); err != nil {
logrus.Debugf("error noting that the compressed version of %q is %q: %v", digester.Digest().String(), compressedDigest.String(), err3) logrus.Debugf("error noting that the compressed version of %q is %q: %v", digester.Digest().String(), compressedDigest.String(), err)
} }
if err3 = ioutils.AtomicWriteFile(compressedFilename+decompressedNote, []byte(digester.Digest().String()), 0600); err3 != nil { if err := ioutils.AtomicWriteFile(compressedFilename+decompressedNote, []byte(digester.Digest().String()), 0600); err != nil {
logrus.Debugf("error noting that the decompressed version of %q is %q: %v", compressedDigest.String(), digester.Digest().String(), err3) logrus.Debugf("error noting that the decompressed version of %q is %q: %v", compressedDigest.String(), digester.Digest().String(), err)
}
}
} else {
// Remove the temporary file.
if err3 = os.Remove(tempFile.Name()); err3 != nil {
logrus.Debugf("error cleaning up temporary file %q for decompressed copy of blob %q: %v", tempFile.Name(), compressedDigest.String(), err3)
}
} }
} }
@ -144,7 +155,10 @@ func (d *blobCacheDestination) PutBlobWithOptions(ctx context.Context, stream io
needToWait := false needToWait := false
compression := archive.Uncompressed compression := archive.Uncompressed
if inputInfo.Digest != "" { if inputInfo.Digest != "" {
filename := d.reference.blobPath(inputInfo.Digest, options.IsConfig) filename, err2 := d.reference.blobPath(inputInfo.Digest, options.IsConfig)
if err2 != nil {
return private.UploadedBlob{}, err2
}
tempfile, err = os.CreateTemp(filepath.Dir(filename), filepath.Base(filename)) tempfile, err = os.CreateTemp(filepath.Dir(filename), filepath.Base(filename))
if err == nil { if err == nil {
stream = io.TeeReader(stream, tempfile) stream = io.TeeReader(stream, tempfile)
@ -273,7 +287,10 @@ func (d *blobCacheDestination) PutManifest(ctx context.Context, manifestBytes []
if err != nil { if err != nil {
logrus.Warnf("error digesting manifest %q: %v", string(manifestBytes), err) logrus.Warnf("error digesting manifest %q: %v", string(manifestBytes), err)
} else { } else {
filename := d.reference.blobPath(manifestDigest, false) filename, err := d.reference.blobPath(manifestDigest, false)
if err != nil {
return err
}
if err = ioutils.AtomicWriteFile(filename, manifestBytes, 0600); err != nil { if err = ioutils.AtomicWriteFile(filename, manifestBytes, 0600); err != nil {
logrus.Warnf("error saving manifest as %q: %v", filename, err) logrus.Warnf("error saving manifest as %q: %v", filename, err)
} }

View File

@ -56,7 +56,10 @@ func (s *blobCacheSource) Close() error {
func (s *blobCacheSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { func (s *blobCacheSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
if instanceDigest != nil { if instanceDigest != nil {
filename := s.reference.blobPath(*instanceDigest, false) filename, err := s.reference.blobPath(*instanceDigest, false)
if err != nil {
return nil, "", err
}
manifestBytes, err := os.ReadFile(filename) manifestBytes, err := os.ReadFile(filename)
if err == nil { if err == nil {
s.cacheHits++ s.cacheHits++
@ -136,8 +139,10 @@ func (s *blobCacheSource) LayerInfosForCopy(ctx context.Context, instanceDigest
replacedInfos := make([]types.BlobInfo, 0, len(infos)) replacedInfos := make([]types.BlobInfo, 0, len(infos))
for _, info := range infos { for _, info := range infos {
var replaceDigest []byte var replaceDigest []byte
var err error blobFile, err := s.reference.blobPath(info.Digest, false)
blobFile := s.reference.blobPath(info.Digest, false) if err != nil {
return nil, err
}
var alternate string var alternate string
switch s.reference.compress { switch s.reference.compress {
case types.Compress: case types.Compress:
@ -148,7 +153,10 @@ func (s *blobCacheSource) LayerInfosForCopy(ctx context.Context, instanceDigest
replaceDigest, err = os.ReadFile(alternate) replaceDigest, err = os.ReadFile(alternate)
} }
if err == nil && digest.Digest(replaceDigest).Validate() == nil { if err == nil && digest.Digest(replaceDigest).Validate() == nil {
alternate = s.reference.blobPath(digest.Digest(replaceDigest), false) alternate, err = s.reference.blobPath(digest.Digest(replaceDigest), false)
if err != nil {
return nil, err
}
fileInfo, err := os.Stat(alternate) fileInfo, err := os.Stat(alternate)
if err == nil { if err == nil {
switch info.MediaType { switch info.MediaType {

View File

@ -361,6 +361,18 @@ func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context,
// tryReusingBlobAsPending implements TryReusingBlobWithOptions for (blobDigest, size or -1), filling s.blobDiffIDs and other metadata. // tryReusingBlobAsPending implements TryReusingBlobWithOptions for (blobDigest, size or -1), filling s.blobDiffIDs and other metadata.
// The caller must arrange the blob to be eventually committed using s.commitLayer(). // The caller must arrange the blob to be eventually committed using s.commitLayer().
func (s *storageImageDestination) tryReusingBlobAsPending(blobDigest digest.Digest, size int64, options *private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { func (s *storageImageDestination) tryReusingBlobAsPending(blobDigest digest.Digest, size int64, options *private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
if blobDigest == "" {
return false, private.ReusedBlob{}, errors.New(`Can not check for a blob with unknown digest`)
}
if err := blobDigest.Validate(); err != nil {
return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with invalid digest: %w", err)
}
if options.TOCDigest != "" {
if err := options.TOCDigest.Validate(); err != nil {
return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with invalid digest: %w", err)
}
}
// lock the entire method as it executes fairly quickly // lock the entire method as it executes fairly quickly
s.lock.Lock() s.lock.Lock()
defer s.lock.Unlock() defer s.lock.Unlock()
@ -380,18 +392,6 @@ func (s *storageImageDestination) tryReusingBlobAsPending(blobDigest digest.Dige
} }
} }
if blobDigest == "" {
return false, private.ReusedBlob{}, errors.New(`Can not check for a blob with unknown digest`)
}
if err := blobDigest.Validate(); err != nil {
return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with invalid digest: %w", err)
}
if options.TOCDigest != "" {
if err := options.TOCDigest.Validate(); err != nil {
return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with invalid digest: %w", err)
}
}
// Check if we have a wasn't-compressed layer in storage that's based on that blob. // Check if we have a wasn't-compressed layer in storage that's based on that blob.
// Check if we've already cached it in a file. // Check if we've already cached it in a file.
@ -1070,8 +1070,12 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t
if err != nil { if err != nil {
return fmt.Errorf("digesting top-level manifest: %w", err) return fmt.Errorf("digesting top-level manifest: %w", err)
} }
key, err := manifestBigDataKey(manifestDigest)
if err != nil {
return err
}
options.BigData = append(options.BigData, storage.ImageBigDataOption{ options.BigData = append(options.BigData, storage.ImageBigDataOption{
Key: manifestBigDataKey(manifestDigest), Key: key,
Data: toplevelManifest, Data: toplevelManifest,
Digest: manifestDigest, Digest: manifestDigest,
}) })
@ -1079,8 +1083,12 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t
// Set up to save the image's manifest. Allow looking it up by digest by using the key convention defined by the Store. // Set up to save the image's manifest. Allow looking it up by digest by using the key convention defined by the Store.
// Record the manifest twice: using a digest-specific key to allow references to that specific digest instance, // Record the manifest twice: using a digest-specific key to allow references to that specific digest instance,
// and using storage.ImageDigestBigDataKey for future users that dont specify any digest and for compatibility with older readers. // and using storage.ImageDigestBigDataKey for future users that dont specify any digest and for compatibility with older readers.
key, err := manifestBigDataKey(s.manifestDigest)
if err != nil {
return err
}
options.BigData = append(options.BigData, storage.ImageBigDataOption{ options.BigData = append(options.BigData, storage.ImageBigDataOption{
Key: manifestBigDataKey(s.manifestDigest), Key: key,
Data: s.manifest, Data: s.manifest,
Digest: s.manifestDigest, Digest: s.manifestDigest,
}) })
@ -1098,8 +1106,12 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t
}) })
} }
for instanceDigest, signatures := range s.signatureses { for instanceDigest, signatures := range s.signatureses {
key, err := signatureBigDataKey(instanceDigest)
if err != nil {
return err
}
options.BigData = append(options.BigData, storage.ImageBigDataOption{ options.BigData = append(options.BigData, storage.ImageBigDataOption{
Key: signatureBigDataKey(instanceDigest), Key: key,
Data: signatures, Data: signatures,
Digest: digest.Canonical.FromBytes(signatures), Digest: digest.Canonical.FromBytes(signatures),
}) })

View File

@ -21,14 +21,20 @@ var (
// manifestBigDataKey returns a key suitable for recording a manifest with the specified digest using storage.Store.ImageBigData and related functions. // manifestBigDataKey returns a key suitable for recording a manifest with the specified digest using storage.Store.ImageBigData and related functions.
// If a specific manifest digest is explicitly requested by the user, the key returned by this function should be used preferably; // If a specific manifest digest is explicitly requested by the user, the key returned by this function should be used preferably;
// for compatibility, if a manifest is not available under this key, check also storage.ImageDigestBigDataKey // for compatibility, if a manifest is not available under this key, check also storage.ImageDigestBigDataKey
func manifestBigDataKey(digest digest.Digest) string { func manifestBigDataKey(digest digest.Digest) (string, error) {
return storage.ImageDigestManifestBigDataNamePrefix + "-" + digest.String() if err := digest.Validate(); err != nil { // Make sure info.Digest.String() uses the expected format and does not collide with other BigData keys.
return "", err
}
return storage.ImageDigestManifestBigDataNamePrefix + "-" + digest.String(), nil
} }
// signatureBigDataKey returns a key suitable for recording the signatures associated with the manifest with the specified digest using storage.Store.ImageBigData and related functions. // signatureBigDataKey returns a key suitable for recording the signatures associated with the manifest with the specified digest using storage.Store.ImageBigData and related functions.
// If a specific manifest digest is explicitly requested by the user, the key returned by this function should be used preferably; // If a specific manifest digest is explicitly requested by the user, the key returned by this function should be used preferably;
func signatureBigDataKey(digest digest.Digest) string { func signatureBigDataKey(digest digest.Digest) (string, error) {
return "signature-" + digest.Encoded() if err := digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
return "", err
}
return "signature-" + digest.Encoded(), nil
} }
// storageImageMetadata is stored, as JSON, in storage.Image.Metadata // storageImageMetadata is stored, as JSON, in storage.Image.Metadata

View File

@ -73,7 +73,10 @@ func multiArchImageMatchesSystemContext(store storage.Store, img *storage.Image,
// We don't need to care about storage.ImageDigestBigDataKey because // We don't need to care about storage.ImageDigestBigDataKey because
// manifests lists are only stored into storage by c/image versions // manifests lists are only stored into storage by c/image versions
// that know about manifestBigDataKey, and only using that key. // that know about manifestBigDataKey, and only using that key.
key := manifestBigDataKey(manifestDigest) key, err := manifestBigDataKey(manifestDigest)
if err != nil {
return false // This should never happen, manifestDigest comes from a reference.Digested, and that validates the format.
}
manifestBytes, err := store.ImageBigData(img.ID, key) manifestBytes, err := store.ImageBigData(img.ID, key)
if err != nil { if err != nil {
return false return false
@ -95,7 +98,10 @@ func multiArchImageMatchesSystemContext(store storage.Store, img *storage.Image,
if err != nil { if err != nil {
return false return false
} }
key = manifestBigDataKey(chosenInstance) key, err = manifestBigDataKey(chosenInstance)
if err != nil {
return false
}
_, err = store.ImageBigData(img.ID, key) _, err = store.ImageBigData(img.ID, key)
return err == nil // true if img.ID is based on chosenInstance. return err == nil // true if img.ID is based on chosenInstance.
} }

View File

@ -237,7 +237,10 @@ func (s *storageImageSource) getBlobAndLayerID(digest digest.Digest, layers []st
// GetManifest() reads the image's manifest. // GetManifest() reads the image's manifest.
func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) (manifestBlob []byte, mimeType string, err error) { func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) (manifestBlob []byte, mimeType string, err error) {
if instanceDigest != nil { if instanceDigest != nil {
key := manifestBigDataKey(*instanceDigest) key, err := manifestBigDataKey(*instanceDigest)
if err != nil {
return nil, "", err
}
blob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key) blob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key)
if err != nil { if err != nil {
return nil, "", fmt.Errorf("reading manifest for image instance %q: %w", *instanceDigest, err) return nil, "", fmt.Errorf("reading manifest for image instance %q: %w", *instanceDigest, err)
@ -249,7 +252,10 @@ func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *di
// Prefer the manifest corresponding to the user-specified digest, if available. // Prefer the manifest corresponding to the user-specified digest, if available.
if s.imageRef.named != nil { if s.imageRef.named != nil {
if digested, ok := s.imageRef.named.(reference.Digested); ok { if digested, ok := s.imageRef.named.(reference.Digested); ok {
key := manifestBigDataKey(digested.Digest()) key, err := manifestBigDataKey(digested.Digest())
if err != nil {
return nil, "", err
}
blob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key) blob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key)
if err != nil && !os.IsNotExist(err) { // os.IsNotExist is true if the image exists but there is no data corresponding to key if err != nil && !os.IsNotExist(err) { // os.IsNotExist is true if the image exists but there is no data corresponding to key
return nil, "", err return nil, "", err
@ -385,7 +391,14 @@ func (s *storageImageSource) GetSignaturesWithFormat(ctx context.Context, instan
instance := "default instance" instance := "default instance"
if instanceDigest != nil { if instanceDigest != nil {
signatureSizes = s.metadata.SignaturesSizes[*instanceDigest] signatureSizes = s.metadata.SignaturesSizes[*instanceDigest]
key = signatureBigDataKey(*instanceDigest) k, err := signatureBigDataKey(*instanceDigest)
if err != nil {
return nil, err
}
key = k
if err := instanceDigest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
return nil, err
}
instance = instanceDigest.Encoded() instance = instanceDigest.Encoded()
} }
if len(signatureSizes) > 0 { if len(signatureSizes) > 0 {

View File

@ -8,7 +8,7 @@ const (
// VersionMinor is for functionality in a backwards-compatible manner // VersionMinor is for functionality in a backwards-compatible manner
VersionMinor = 30 VersionMinor = 30
// VersionPatch is for backwards-compatible bug fixes // VersionPatch is for backwards-compatible bug fixes
VersionPatch = 0 VersionPatch = 1
// VersionDev indicates development branch. Releases will be empty string. // VersionDev indicates development branch. Releases will be empty string.
VersionDev = "" VersionDev = ""

4
vendor/modules.txt vendored
View File

@ -106,7 +106,7 @@ github.com/containernetworking/cni/pkg/version
# github.com/containernetworking/plugins v1.4.0 # github.com/containernetworking/plugins v1.4.0
## explicit; go 1.20 ## explicit; go 1.20
github.com/containernetworking/plugins/pkg/ns github.com/containernetworking/plugins/pkg/ns
# github.com/containers/common v0.58.1 # github.com/containers/common v0.58.3
## explicit; go 1.20 ## explicit; go 1.20
github.com/containers/common/internal github.com/containers/common/internal
github.com/containers/common/internal/attributedstring github.com/containers/common/internal/attributedstring
@ -159,7 +159,7 @@ github.com/containers/common/pkg/umask
github.com/containers/common/pkg/util github.com/containers/common/pkg/util
github.com/containers/common/pkg/version github.com/containers/common/pkg/version
github.com/containers/common/version github.com/containers/common/version
# github.com/containers/image/v5 v5.30.0 # github.com/containers/image/v5 v5.30.1
## explicit; go 1.19 ## explicit; go 1.19
github.com/containers/image/v5/copy github.com/containers/image/v5/copy
github.com/containers/image/v5/directory github.com/containers/image/v5/directory