buildah/tests/blobcache.bats

212 lines
8.6 KiB
Plaintext
Raw Permalink Normal View History

#!/usr/bin/env bats
load helpers
@test "blobcache-pull" {
blobcachedir=${TEST_SCRATCH_DIR}/cache
mkdir -p ${blobcachedir}
# Pull an image using a fresh directory for the blob cache.
run_buildah pull --blob-cache=${blobcachedir} $WITH_POLICY_JSON registry.k8s.io/pause
# Check that we dropped some files in there.
run find ${blobcachedir} -type f
echo "$output"
[ "$status" -eq 0 ]
[ "${#lines[@]}" -gt 0 ]
}
@test "blobcache-from" {
blobcachedir=${TEST_SCRATCH_DIR}/cache
mkdir -p ${blobcachedir}
# Pull an image using a fresh directory for the blob cache.
run_buildah from --blob-cache=${blobcachedir} $WITH_POLICY_JSON registry.k8s.io/pause
# Check that we dropped some files in there.
run find ${blobcachedir} -type f
echo "$output"
[ "$status" -eq 0 ]
[ "${#lines[*]}" -gt 0 ]
}
function _check_matches() {
local destdir="$1"
local blobcachedir="$2"
# Look for layer blobs in the destination that match the ones in the cache.
local matched=0
local unmatched=0
for content in ${destdir}/* ; do
match=false
for blob in ${blobcachedir}/* ; do
if cmp -s ${content} ${blob} ; then
echo $(file ${blob}) and ${content} have the same contents, was cached
match=true
break
fi
done
if ${match} ; then
matched=$(( ${matched} + 1 ))
else
unmatched=$(( ${unmatched} + 1 ))
echo ${content} was not cached
fi
done
expect_output --from="$matched" "$3" "$4 should match"
expect_output --from="$unmatched" "$5" "$6 should not match"
}
blobcacheinfo,test: blobs must be resued when pushing across registry It seems we try to reuse blobs only for the specified registry, however we can have valid known compressed digests across registry as well following pr attempts to use that by doing following steps. * `CandidateLocations2` now processes all known blobs and appends them to returned candidates at the lowest priority. As a result when `TryReusingBlob` tries to process these candidates and if the blobs filtered by the `Opaque` set by the `transport` fail to match then attempt is made against all known blobs (ones which do not belong to the current registry). * Increase the sample set of potential blob reuse to all known compressed digests , also involving the one which do not belong to current registry. * If a blob is found match it against the registry where we are attempting to push. If blob is already there consider it a `CACHE HIT!` and reply skipping blob, since its already there. ---- ```console $ skopeo copy docker://registry.fedoraproject.org/fedora-minimal docker://quay.io/fl/test:some-tag $ buildah pull registry.fedoraproject.org/fedora-minimal $ buildah tag registry.fedoraproject.org/fedora-minimal quay.io/fl/test $ buildah push quay.io/fl/test ``` ```console Getting image source signatures Copying blob a3497ca15bbf skipped: already exists Copying config f7e02de757 done Writing manifest to image destination Storing signatures ``` Testing: https://github.com/containers/image/pull/1645 Signed-off-by: Aditya R <arajan@redhat.com>
2023-11-10 20:09:01 +08:00
# Integration test for https://github.com/containers/image/pull/1645
@test "blobcache: blobs must be reused when pushing across registry" {
start_registry
imgname=blobimg-$(random_string | tr A-Z a-z)
blobcacheinfo,test: blobs must be resued when pushing across registry It seems we try to reuse blobs only for the specified registry, however we can have valid known compressed digests across registry as well following pr attempts to use that by doing following steps. * `CandidateLocations2` now processes all known blobs and appends them to returned candidates at the lowest priority. As a result when `TryReusingBlob` tries to process these candidates and if the blobs filtered by the `Opaque` set by the `transport` fail to match then attempt is made against all known blobs (ones which do not belong to the current registry). * Increase the sample set of potential blob reuse to all known compressed digests , also involving the one which do not belong to current registry. * If a blob is found match it against the registry where we are attempting to push. If blob is already there consider it a `CACHE HIT!` and reply skipping blob, since its already there. ---- ```console $ skopeo copy docker://registry.fedoraproject.org/fedora-minimal docker://quay.io/fl/test:some-tag $ buildah pull registry.fedoraproject.org/fedora-minimal $ buildah tag registry.fedoraproject.org/fedora-minimal quay.io/fl/test $ buildah push quay.io/fl/test ``` ```console Getting image source signatures Copying blob a3497ca15bbf skipped: already exists Copying config f7e02de757 done Writing manifest to image destination Storing signatures ``` Testing: https://github.com/containers/image/pull/1645 Signed-off-by: Aditya R <arajan@redhat.com>
2023-11-10 20:09:01 +08:00
run_buildah login --tls-verify=false --authfile ${TEST_SCRATCH_DIR}/test.auth --username testuser --password testpassword localhost:${REGISTRY_PORT}
outputdir=${TEST_SCRATCH_DIR}/outputdir
mkdir -p ${outputdir}
podman run --rm --mount type=bind,src=${TEST_SCRATCH_DIR}/test.auth,target=/test.auth,Z --mount type=bind,src=${outputdir},target=/output,Z --net host quay.io/skopeo/stable copy --preserve-digests --authfile=/test.auth --tls-verify=false docker://registry.fedoraproject.org/fedora-minimal dir:/output
blobcacheinfo,test: blobs must be resued when pushing across registry It seems we try to reuse blobs only for the specified registry, however we can have valid known compressed digests across registry as well following pr attempts to use that by doing following steps. * `CandidateLocations2` now processes all known blobs and appends them to returned candidates at the lowest priority. As a result when `TryReusingBlob` tries to process these candidates and if the blobs filtered by the `Opaque` set by the `transport` fail to match then attempt is made against all known blobs (ones which do not belong to the current registry). * Increase the sample set of potential blob reuse to all known compressed digests , also involving the one which do not belong to current registry. * If a blob is found match it against the registry where we are attempting to push. If blob is already there consider it a `CACHE HIT!` and reply skipping blob, since its already there. ---- ```console $ skopeo copy docker://registry.fedoraproject.org/fedora-minimal docker://quay.io/fl/test:some-tag $ buildah pull registry.fedoraproject.org/fedora-minimal $ buildah tag registry.fedoraproject.org/fedora-minimal quay.io/fl/test $ buildah push quay.io/fl/test ``` ```console Getting image source signatures Copying blob a3497ca15bbf skipped: already exists Copying config f7e02de757 done Writing manifest to image destination Storing signatures ``` Testing: https://github.com/containers/image/pull/1645 Signed-off-by: Aditya R <arajan@redhat.com>
2023-11-10 20:09:01 +08:00
run_buildah rmi --all -f
run_buildah pull dir:${outputdir}
run_buildah images -a --format '{{.ID}}'
cid=$output
run_buildah --log-level debug push --tls-verify=false --authfile ${TEST_SCRATCH_DIR}/test.auth $cid docker://localhost:${REGISTRY_PORT}/$imgname
blobcacheinfo,test: blobs must be resued when pushing across registry It seems we try to reuse blobs only for the specified registry, however we can have valid known compressed digests across registry as well following pr attempts to use that by doing following steps. * `CandidateLocations2` now processes all known blobs and appends them to returned candidates at the lowest priority. As a result when `TryReusingBlob` tries to process these candidates and if the blobs filtered by the `Opaque` set by the `transport` fail to match then attempt is made against all known blobs (ones which do not belong to the current registry). * Increase the sample set of potential blob reuse to all known compressed digests , also involving the one which do not belong to current registry. * If a blob is found match it against the registry where we are attempting to push. If blob is already there consider it a `CACHE HIT!` and reply skipping blob, since its already there. ---- ```console $ skopeo copy docker://registry.fedoraproject.org/fedora-minimal docker://quay.io/fl/test:some-tag $ buildah pull registry.fedoraproject.org/fedora-minimal $ buildah tag registry.fedoraproject.org/fedora-minimal quay.io/fl/test $ buildah push quay.io/fl/test ``` ```console Getting image source signatures Copying blob a3497ca15bbf skipped: already exists Copying config f7e02de757 done Writing manifest to image destination Storing signatures ``` Testing: https://github.com/containers/image/pull/1645 Signed-off-by: Aditya R <arajan@redhat.com>
2023-11-10 20:09:01 +08:00
# must not contain "Skipping blob" since push must happen
assert "$output" !~ "Skipping blob"
# Clear local image and c/image's blob-info-cache
run_buildah rmi --all -f
cachedir=/var/lib
blobcacheinfo,test: blobs must be resued when pushing across registry It seems we try to reuse blobs only for the specified registry, however we can have valid known compressed digests across registry as well following pr attempts to use that by doing following steps. * `CandidateLocations2` now processes all known blobs and appends them to returned candidates at the lowest priority. As a result when `TryReusingBlob` tries to process these candidates and if the blobs filtered by the `Opaque` set by the `transport` fail to match then attempt is made against all known blobs (ones which do not belong to the current registry). * Increase the sample set of potential blob reuse to all known compressed digests , also involving the one which do not belong to current registry. * If a blob is found match it against the registry where we are attempting to push. If blob is already there consider it a `CACHE HIT!` and reply skipping blob, since its already there. ---- ```console $ skopeo copy docker://registry.fedoraproject.org/fedora-minimal docker://quay.io/fl/test:some-tag $ buildah pull registry.fedoraproject.org/fedora-minimal $ buildah tag registry.fedoraproject.org/fedora-minimal quay.io/fl/test $ buildah push quay.io/fl/test ``` ```console Getting image source signatures Copying blob a3497ca15bbf skipped: already exists Copying config f7e02de757 done Writing manifest to image destination Storing signatures ``` Testing: https://github.com/containers/image/pull/1645 Signed-off-by: Aditya R <arajan@redhat.com>
2023-11-10 20:09:01 +08:00
if is_rootless;
then
cachedir=$HOME/.local/share
blobcacheinfo,test: blobs must be resued when pushing across registry It seems we try to reuse blobs only for the specified registry, however we can have valid known compressed digests across registry as well following pr attempts to use that by doing following steps. * `CandidateLocations2` now processes all known blobs and appends them to returned candidates at the lowest priority. As a result when `TryReusingBlob` tries to process these candidates and if the blobs filtered by the `Opaque` set by the `transport` fail to match then attempt is made against all known blobs (ones which do not belong to the current registry). * Increase the sample set of potential blob reuse to all known compressed digests , also involving the one which do not belong to current registry. * If a blob is found match it against the registry where we are attempting to push. If blob is already there consider it a `CACHE HIT!` and reply skipping blob, since its already there. ---- ```console $ skopeo copy docker://registry.fedoraproject.org/fedora-minimal docker://quay.io/fl/test:some-tag $ buildah pull registry.fedoraproject.org/fedora-minimal $ buildah tag registry.fedoraproject.org/fedora-minimal quay.io/fl/test $ buildah push quay.io/fl/test ``` ```console Getting image source signatures Copying blob a3497ca15bbf skipped: already exists Copying config f7e02de757 done Writing manifest to image destination Storing signatures ``` Testing: https://github.com/containers/image/pull/1645 Signed-off-by: Aditya R <arajan@redhat.com>
2023-11-10 20:09:01 +08:00
fi
cachefile=$cachedir/containers/cache/blob-info-cache-v1.sqlite
run rm $cachefile
assert "$status" -eq 0 "status of `run rm $cachefile` must be 0"
blobcacheinfo,test: blobs must be resued when pushing across registry It seems we try to reuse blobs only for the specified registry, however we can have valid known compressed digests across registry as well following pr attempts to use that by doing following steps. * `CandidateLocations2` now processes all known blobs and appends them to returned candidates at the lowest priority. As a result when `TryReusingBlob` tries to process these candidates and if the blobs filtered by the `Opaque` set by the `transport` fail to match then attempt is made against all known blobs (ones which do not belong to the current registry). * Increase the sample set of potential blob reuse to all known compressed digests , also involving the one which do not belong to current registry. * If a blob is found match it against the registry where we are attempting to push. If blob is already there consider it a `CACHE HIT!` and reply skipping blob, since its already there. ---- ```console $ skopeo copy docker://registry.fedoraproject.org/fedora-minimal docker://quay.io/fl/test:some-tag $ buildah pull registry.fedoraproject.org/fedora-minimal $ buildah tag registry.fedoraproject.org/fedora-minimal quay.io/fl/test $ buildah push quay.io/fl/test ``` ```console Getting image source signatures Copying blob a3497ca15bbf skipped: already exists Copying config f7e02de757 done Writing manifest to image destination Storing signatures ``` Testing: https://github.com/containers/image/pull/1645 Signed-off-by: Aditya R <arajan@redhat.com>
2023-11-10 20:09:01 +08:00
# In first push blob must be skipped after vendoring https://github.com/containers/image/pull/1645
run_buildah pull dir:${outputdir}
run_buildah images -a --format '{{.ID}}'
cid=$output
run_buildah --log-level debug push --tls-verify=false --authfile ${TEST_SCRATCH_DIR}/test.auth $cid docker://localhost:${REGISTRY_PORT}/$imgname
blobcacheinfo,test: blobs must be resued when pushing across registry It seems we try to reuse blobs only for the specified registry, however we can have valid known compressed digests across registry as well following pr attempts to use that by doing following steps. * `CandidateLocations2` now processes all known blobs and appends them to returned candidates at the lowest priority. As a result when `TryReusingBlob` tries to process these candidates and if the blobs filtered by the `Opaque` set by the `transport` fail to match then attempt is made against all known blobs (ones which do not belong to the current registry). * Increase the sample set of potential blob reuse to all known compressed digests , also involving the one which do not belong to current registry. * If a blob is found match it against the registry where we are attempting to push. If blob is already there consider it a `CACHE HIT!` and reply skipping blob, since its already there. ---- ```console $ skopeo copy docker://registry.fedoraproject.org/fedora-minimal docker://quay.io/fl/test:some-tag $ buildah pull registry.fedoraproject.org/fedora-minimal $ buildah tag registry.fedoraproject.org/fedora-minimal quay.io/fl/test $ buildah push quay.io/fl/test ``` ```console Getting image source signatures Copying blob a3497ca15bbf skipped: already exists Copying config f7e02de757 done Writing manifest to image destination Storing signatures ``` Testing: https://github.com/containers/image/pull/1645 Signed-off-by: Aditya R <arajan@redhat.com>
2023-11-10 20:09:01 +08:00
expect_output --substring "Skipping blob"
}
@test "blobcache-commit" {
blobcachedir=${TEST_SCRATCH_DIR}/cache
mkdir -p ${blobcachedir}
# Pull an image using a fresh directory for the blob cache.
run_buildah from --quiet --blob-cache=${blobcachedir} $WITH_POLICY_JSON registry.k8s.io/pause
ctr="$output"
run_buildah add ${ctr} $BUDFILES/add-file/file /
# Commit the image without using the blob cache, using compression so that uncompressed blobs
# in the cache which we inherited from our base image won't be matched.
doomeddir=${TEST_SCRATCH_DIR}/doomed
mkdir -p ${doomeddir}
run_buildah commit $WITH_POLICY_JSON --disable-compression=false ${ctr} dir:${doomeddir}
_check_matches $doomeddir $blobcachedir \
0 "nothing" \
6 "everything"
# Commit the image using the blob cache, again using compression. We'll have recorded the
# compressed digests that match the uncompressed digests the last time around, so we should
# get some matches this time.
destdir=${TEST_SCRATCH_DIR}/dest
mkdir -p ${destdir}
ls -l ${blobcachedir}
run_buildah commit $WITH_POLICY_JSON --blob-cache=${blobcachedir} --disable-compression=false ${ctr} dir:${destdir}
_check_matches $destdir $blobcachedir \
5 "base layers, new layer, config, and manifest" \
1 "version"
}
@test "blobcache-push" {
target=targetimage
blobcachedir=${TEST_SCRATCH_DIR}/cache
mkdir -p ${blobcachedir}
# Pull an image using a fresh directory for the blob cache.
run_buildah from --quiet --blob-cache=${blobcachedir} $WITH_POLICY_JSON registry.k8s.io/pause
ctr="$output"
run_buildah add ${ctr} $BUDFILES/add-file/file /
# Commit the image using the blob cache.
ls -l ${blobcachedir}
run_buildah commit $WITH_POLICY_JSON --blob-cache=${blobcachedir} --disable-compression=false ${ctr} ${target}
# Try to push the image without the blob cache.
doomeddir=${TEST_SCRATCH_DIR}/doomed
mkdir -p ${doomeddir}
ls -l ${blobcachedir}
run_buildah push $WITH_POLICY_JSON ${target} dir:${doomeddir}
_check_matches $doomeddir $blobcachedir \
2 "only config and new layer" \
4 "version, manifest, base layers"
# Now try to push the image using the blob cache.
destdir=${TEST_SCRATCH_DIR}/dest
mkdir -p ${destdir}
ls -l ${blobcachedir}
run_buildah push $WITH_POLICY_JSON --blob-cache=${blobcachedir} ${target} dir:${destdir}
_check_matches $destdir $blobcachedir \
5 "base image layers, new layer, config, and manifest" \
1 "version"
}
@test "blobcache-build-compressed-using-dockerfile-explicit-push" {
blobcachedir=${TEST_SCRATCH_DIR}/cache
mkdir -p ${blobcachedir}
target=new-image
# Build an image while pulling the base image. Compress the layers so that they get added
# to the blob cache in their compressed forms.
run_buildah build-using-dockerfile -t ${target} --pull-always $WITH_POLICY_JSON --blob-cache=${blobcachedir} --disable-compression=false $BUDFILES/add-file
# Now try to push the image using the blob cache. The blob cache will only suggest the
# compressed version of a blob if it's been told that we want to compress things, so
# we also request compression here to avoid having the copy logic just compress the
# uncompressed copy again.
destdir=${TEST_SCRATCH_DIR}/dest
mkdir -p ${destdir}
run_buildah push $WITH_POLICY_JSON --blob-cache=${blobcachedir} --disable-compression=false ${target} dir:${destdir}
_check_matches $destdir $blobcachedir \
4 "config, base layer, new layer, and manifest" \
1 "version"
}
@test "blobcache-build-uncompressed-using-dockerfile-explicit-push" {
blobcachedir=${TEST_SCRATCH_DIR}/cache
mkdir -p ${blobcachedir}
target=new-image
# Build an image while pulling the base image.
run_buildah build-using-dockerfile -t ${target} -D --pull-always --blob-cache=${blobcachedir} $WITH_POLICY_JSON $BUDFILES/add-file
# Now try to push the image using the blob cache.
destdir=${TEST_SCRATCH_DIR}/dest
mkdir -p ${destdir}
run_buildah push $WITH_POLICY_JSON --blob-cache=${blobcachedir} ${target} dir:${destdir}
_check_matches $destdir $blobcachedir \
2 "config and previously-compressed base layer" \
3 "version, new layer, and manifest"
}
@test "blobcache-build-compressed-using-dockerfile-implicit-push" {
blobcachedir=${TEST_SCRATCH_DIR}/cache
mkdir -p ${blobcachedir}
target=new-image
destdir=${TEST_SCRATCH_DIR}/dest
mkdir -p ${destdir}
# Build an image while pulling the base image, implicitly pushing while writing.
run_buildah build-using-dockerfile -t dir:${destdir} --pull-always --blob-cache=${blobcachedir} $WITH_POLICY_JSON $BUDFILES/add-file
_check_matches $destdir $blobcachedir \
4 "base image, layer, config, and manifest" \
1 "version"
}
@test "blobcache-build-uncompressed-using-dockerfile-implicit-push" {
blobcachedir=${TEST_SCRATCH_DIR}/cache
mkdir -p ${blobcachedir}
target=new-image
destdir=${TEST_SCRATCH_DIR}/dest
mkdir -p ${destdir}
# Build an image while pulling the base image, implicitly pushing while writing.
run_buildah build-using-dockerfile -t dir:${destdir} -D --pull-always --blob-cache=${blobcachedir} $WITH_POLICY_JSON $BUDFILES/add-file
_check_matches $destdir $blobcachedir \
4 "base image, our layer, config, and manifest" \
1 "version"
}