conformance tests: don't break on trailing zeroes in layer blobs

When analyzing a layer blob's contents, don't break if the blob has more
zeroes padding it out even after the tar reader thinks it's hit the end
of the archive.

Add more detail to the diagnostic error we print when there's a digest
or length mismatch, too, in case it's triggered by something other than
zero padding.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
This commit is contained in:
Nalin Dahyabhai 2024-05-02 14:48:41 -04:00
parent fe095c8bfa
commit a936abcba2
1 changed files with 19 additions and 4 deletions

View File

@ -31,6 +31,7 @@ import (
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
"github.com/containers/storage"
"github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/reexec"
docker "github.com/fsouza/go-dockerclient"
digest "github.com/opencontainers/go-digest"
@ -847,11 +848,15 @@ func saveReport(ctx context.Context, t *testing.T, ref types.ImageReference, dir
// summarizeLayer reads a blob and returns a summary of the parts of its contents that we care about
func summarizeLayer(t *testing.T, imageName string, blobInfo types.BlobInfo, reader io.Reader) (layer Layer) {
compressedDigest := digest.Canonical.Digester()
uncompressedBlob, _, err := compression.AutoDecompress(io.TeeReader(reader, compressedDigest.Hash()))
require.Nil(t, err, "error decompressing blob %+v for image %q", blobInfo, imageName)
counter := ioutils.NewWriteCounter(compressedDigest.Hash())
compressionAlgorithm, _, reader, err := compression.DetectCompressionFormat(reader)
require.NoErrorf(t, err, "error checking if blob %+v for image %q is compressed", blobInfo, imageName)
uncompressedBlob, wasCompressed, err := compression.AutoDecompress(io.TeeReader(reader, counter))
require.NoErrorf(t, err, "error decompressing blob %+v for image %q", blobInfo, imageName)
defer uncompressedBlob.Close()
uncompressedDigest := digest.Canonical.Digester()
tr := tar.NewReader(io.TeeReader(uncompressedBlob, uncompressedDigest.Hash()))
tarToRead := io.TeeReader(uncompressedBlob, uncompressedDigest.Hash())
tr := tar.NewReader(tarToRead)
hdr, err := tr.Next()
for err == nil {
header := fsHeaderForEntry(hdr)
@ -866,8 +871,18 @@ func summarizeLayer(t *testing.T, imageName string, blobInfo types.BlobInfo, rea
hdr, err = tr.Next()
}
require.Equal(t, io.EOF, err, "unexpected error reading layer contents %+v for image %q", blobInfo, imageName)
_, err = io.Copy(ioutil.Discard, tarToRead)
require.NoError(t, err, "reading out any not-usually-present zero padding at the end")
layer.CompressedDigest = compressedDigest.Digest()
require.Equal(t, blobInfo.Digest, layer.CompressedDigest, "calculated digest of compressed blob didn't match expected digest")
blobFormatDescription := "uncompressed"
if wasCompressed {
if compressionAlgorithm.Name() != "" {
blobFormatDescription = "compressed with " + compressionAlgorithm.Name()
} else {
blobFormatDescription = "compressed (?)"
}
}
require.Equalf(t, blobInfo.Digest, layer.CompressedDigest, "calculated digest of %s blob didn't match expected digest (expected length %d, actual length %d)", blobFormatDescription, blobInfo.Size, counter.Count)
layer.UncompressedDigest = uncompressedDigest.Digest()
return layer
}