build(deps): bump github.com/containers/image/v5 from 5.13.2 to 5.14.0
Bumps [github.com/containers/image/v5](https://github.com/containers/image) from 5.13.2 to 5.14.0. - [Release notes](https://github.com/containers/image/releases) - [Commits](https://github.com/containers/image/compare/v5.13.2...v5.14.0) --- updated-dependencies: - dependency-name: github.com/containers/image/v5 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] <support@github.com> Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
This commit is contained in:
parent
4535bc5342
commit
914a7e5299
4
go.mod
4
go.mod
|
@ -5,7 +5,7 @@ go 1.12
|
||||||
require (
|
require (
|
||||||
github.com/containernetworking/cni v0.8.1
|
github.com/containernetworking/cni v0.8.1
|
||||||
github.com/containers/common v0.41.1-0.20210721112610-c95d2f794edf
|
github.com/containers/common v0.41.1-0.20210721112610-c95d2f794edf
|
||||||
github.com/containers/image/v5 v5.13.2
|
github.com/containers/image/v5 v5.14.0
|
||||||
github.com/containers/ocicrypt v1.1.2
|
github.com/containers/ocicrypt v1.1.2
|
||||||
github.com/containers/storage v1.33.0
|
github.com/containers/storage v1.33.0
|
||||||
github.com/docker/distribution v2.7.1+incompatible
|
github.com/docker/distribution v2.7.1+incompatible
|
||||||
|
@ -36,7 +36,7 @@ require (
|
||||||
go.etcd.io/bbolt v1.3.6
|
go.etcd.io/bbolt v1.3.6
|
||||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2
|
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2
|
||||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||||
golang.org/x/sys v0.0.0-20210603125802-9665404d3644
|
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22
|
||||||
k8s.io/klog v1.0.0 // indirect
|
k8s.io/klog v1.0.0 // indirect
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
9
go.sum
9
go.sum
|
@ -227,8 +227,9 @@ github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHV
|
||||||
github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8=
|
github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8=
|
||||||
github.com/containers/common v0.41.1-0.20210721112610-c95d2f794edf h1:z0ciG0ByyJG3WCBpLYd2XLThCC7UBaH7GeSfXY4sAqc=
|
github.com/containers/common v0.41.1-0.20210721112610-c95d2f794edf h1:z0ciG0ByyJG3WCBpLYd2XLThCC7UBaH7GeSfXY4sAqc=
|
||||||
github.com/containers/common v0.41.1-0.20210721112610-c95d2f794edf/go.mod h1:Ba5YVNCnyX6xDtg1JqEHa2EMVMW5UbHmIyEqsEwpeGE=
|
github.com/containers/common v0.41.1-0.20210721112610-c95d2f794edf/go.mod h1:Ba5YVNCnyX6xDtg1JqEHa2EMVMW5UbHmIyEqsEwpeGE=
|
||||||
github.com/containers/image/v5 v5.13.2 h1:AgYunV/9d2fRkrmo23wH2MkqeHolFd6oQCkK+1PpuFA=
|
|
||||||
github.com/containers/image/v5 v5.13.2/go.mod h1:GkWursKDlDcUIT7L7vZf70tADvZCk/Ga0wgS0MuF0ag=
|
github.com/containers/image/v5 v5.13.2/go.mod h1:GkWursKDlDcUIT7L7vZf70tADvZCk/Ga0wgS0MuF0ag=
|
||||||
|
github.com/containers/image/v5 v5.14.0 h1:ORaFZ/NwFjkSunMhxg9I8fQod8pgXkrYNiZzit/VqOE=
|
||||||
|
github.com/containers/image/v5 v5.14.0/go.mod h1:SxiBKOcKuT+4yTjD0AskjO+UwFvNcVOJ9qlAw1HNSPU=
|
||||||
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE=
|
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE=
|
||||||
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
|
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
|
||||||
github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
|
github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
|
||||||
|
@ -781,8 +782,9 @@ github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtX
|
||||||
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||||
github.com/vbatts/tar-split v0.11.1 h1:0Odu65rhcZ3JZaPHxl7tCI3V/C/Q9Zf82UFravl02dE=
|
github.com/vbatts/tar-split v0.11.1 h1:0Odu65rhcZ3JZaPHxl7tCI3V/C/Q9Zf82UFravl02dE=
|
||||||
github.com/vbatts/tar-split v0.11.1/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g=
|
github.com/vbatts/tar-split v0.11.1/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g=
|
||||||
github.com/vbauerster/mpb/v7 v7.0.2 h1:eN6AD/ytv1nqCO7Dm8MO0/pGMKmMyH/WMnTJhAUuc/w=
|
|
||||||
github.com/vbauerster/mpb/v7 v7.0.2/go.mod h1:Mnq3gESXJ9eQhccbGZDggJ1faTCrmaA4iN57fUloRGE=
|
github.com/vbauerster/mpb/v7 v7.0.2/go.mod h1:Mnq3gESXJ9eQhccbGZDggJ1faTCrmaA4iN57fUloRGE=
|
||||||
|
github.com/vbauerster/mpb/v7 v7.0.3 h1:NfX0pHWhlDTev15M/C3qmSTM1EiIjcS+/d6qS6H4FnI=
|
||||||
|
github.com/vbauerster/mpb/v7 v7.0.3/go.mod h1:NXGsfPGx6G2JssqvEcULtDqUrxuuYs4llpv8W6ZUpzk=
|
||||||
github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
|
github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
|
||||||
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
|
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
|
||||||
github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852 h1:cPXZWzzG0NllBLdjWoD1nDfaqu98YMv+OneaKc8sPOA=
|
github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852 h1:cPXZWzzG0NllBLdjWoD1nDfaqu98YMv+OneaKc8sPOA=
|
||||||
|
@ -1045,8 +1047,9 @@ golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20210603125802-9665404d3644 h1:CA1DEQ4NdKphKeL70tvsWNdT5oFh1lOjihRcEDROi0I=
|
|
||||||
golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 h1:RqytpXGR1iVNX7psjB3ff8y7sNFinVFvkx1c8SjBkio=
|
||||||
|
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/term v0.0.0-20201113234701-d7a72108b828/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
golang.org/x/term v0.0.0-20201113234701-d7a72108b828/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
|
|
|
@ -404,11 +404,11 @@ load helpers
|
||||||
|
|
||||||
# Try encrypted image without key should fail
|
# Try encrypted image without key should fail
|
||||||
run_buildah 125 from oci:${TESTDIR}/tmp/busybox_enc
|
run_buildah 125 from oci:${TESTDIR}/tmp/busybox_enc
|
||||||
expect_output --substring "Error decrypting layer .* missing private key needed for decryption"
|
expect_output --substring "decrypting layer .* missing private key needed for decryption"
|
||||||
|
|
||||||
# Try encrypted image with wrong key should fail
|
# Try encrypted image with wrong key should fail
|
||||||
run_buildah 125 from --decryption-key ${TESTDIR}/tmp/mykey2.pem oci:${TESTDIR}/tmp/busybox_enc
|
run_buildah 125 from --decryption-key ${TESTDIR}/tmp/mykey2.pem oci:${TESTDIR}/tmp/busybox_enc
|
||||||
expect_output --substring "Error decrypting layer .* no suitable key unwrapper found or none of the private keys could be used for decryption"
|
expect_output --substring "decrypting layer .* no suitable key unwrapper found or none of the private keys could be used for decryption"
|
||||||
|
|
||||||
# Providing the right key should succeed
|
# Providing the right key should succeed
|
||||||
run_buildah from --decryption-key ${TESTDIR}/tmp/mykey.pem oci:${TESTDIR}/tmp/busybox_enc
|
run_buildah from --decryption-key ${TESTDIR}/tmp/mykey.pem oci:${TESTDIR}/tmp/busybox_enc
|
||||||
|
@ -426,11 +426,11 @@ load helpers
|
||||||
|
|
||||||
# Try encrypted image without key should fail
|
# Try encrypted image without key should fail
|
||||||
run_buildah 125 from --tls-verify=false --creds testuser:testpassword docker://localhost:5000/buildah/busybox_encrypted:latest
|
run_buildah 125 from --tls-verify=false --creds testuser:testpassword docker://localhost:5000/buildah/busybox_encrypted:latest
|
||||||
expect_output --substring "Error decrypting layer .* missing private key needed for decryption"
|
expect_output --substring "decrypting layer .* missing private key needed for decryption"
|
||||||
|
|
||||||
# Try encrypted image with wrong key should fail
|
# Try encrypted image with wrong key should fail
|
||||||
run_buildah 125 from --tls-verify=false --creds testuser:testpassword --decryption-key ${TESTDIR}/tmp/mykey2.pem docker://localhost:5000/buildah/busybox_encrypted:latest
|
run_buildah 125 from --tls-verify=false --creds testuser:testpassword --decryption-key ${TESTDIR}/tmp/mykey2.pem docker://localhost:5000/buildah/busybox_encrypted:latest
|
||||||
expect_output --substring "Error decrypting layer .* no suitable key unwrapper found or none of the private keys could be used for decryption"
|
expect_output --substring "decrypting layer .* no suitable key unwrapper found or none of the private keys could be used for decryption"
|
||||||
|
|
||||||
# Providing the right key should succeed
|
# Providing the right key should succeed
|
||||||
run_buildah from --tls-verify=false --creds testuser:testpassword --decryption-key ${TESTDIR}/tmp/mykey.pem docker://localhost:5000/buildah/busybox_encrypted:latest
|
run_buildah from --tls-verify=false --creds testuser:testpassword --decryption-key ${TESTDIR}/tmp/mykey.pem docker://localhost:5000/buildah/busybox_encrypted:latest
|
||||||
|
|
|
@ -13,7 +13,7 @@ load helpers
|
||||||
# force a failed pull and look at the error message which *must* include the
|
# force a failed pull and look at the error message which *must* include the
|
||||||
# the resolved image name (localhost/image:latest).
|
# the resolved image name (localhost/image:latest).
|
||||||
run_buildah 125 pull --policy=always image
|
run_buildah 125 pull --policy=always image
|
||||||
[[ "$output" == *"Error initializing source docker://localhost/image:latest"* ]]
|
[[ "$output" == *"initializing source docker://localhost/image:latest"* ]]
|
||||||
run_buildah rmi localhost/image ${iid}
|
run_buildah rmi localhost/image ${iid}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -186,11 +186,11 @@ load helpers
|
||||||
|
|
||||||
# Try to pull encrypted image without key should fail
|
# Try to pull encrypted image without key should fail
|
||||||
run_buildah 125 pull --signature-policy ${TESTSDIR}/policy.json oci:${TESTDIR}/tmp/busybox_enc
|
run_buildah 125 pull --signature-policy ${TESTSDIR}/policy.json oci:${TESTDIR}/tmp/busybox_enc
|
||||||
expect_output --substring "Error decrypting layer .* missing private key needed for decryption"
|
expect_output --substring "decrypting layer .* missing private key needed for decryption"
|
||||||
|
|
||||||
# Try to pull encrypted image with wrong key should fail
|
# Try to pull encrypted image with wrong key should fail
|
||||||
run_buildah 125 pull --signature-policy ${TESTSDIR}/policy.json --decryption-key ${TESTDIR}/tmp/mykey2.pem oci:${TESTDIR}/tmp/busybox_enc
|
run_buildah 125 pull --signature-policy ${TESTSDIR}/policy.json --decryption-key ${TESTDIR}/tmp/mykey2.pem oci:${TESTDIR}/tmp/busybox_enc
|
||||||
expect_output --substring "Error decrypting layer .* no suitable key unwrapper found or none of the private keys could be used for decryption"
|
expect_output --substring "decrypting layer .* no suitable key unwrapper found or none of the private keys could be used for decryption"
|
||||||
|
|
||||||
# Providing the right key should succeed
|
# Providing the right key should succeed
|
||||||
run_buildah pull --signature-policy ${TESTSDIR}/policy.json --decryption-key ${TESTDIR}/tmp/mykey.pem oci:${TESTDIR}/tmp/busybox_enc
|
run_buildah pull --signature-policy ${TESTSDIR}/policy.json --decryption-key ${TESTDIR}/tmp/mykey.pem oci:${TESTDIR}/tmp/busybox_enc
|
||||||
|
@ -208,11 +208,11 @@ load helpers
|
||||||
|
|
||||||
# Try to pull encrypted image without key should fail
|
# Try to pull encrypted image without key should fail
|
||||||
run_buildah 125 pull --signature-policy ${TESTSDIR}/policy.json --tls-verify=false --creds testuser:testpassword docker://localhost:5000/buildah/busybox_encrypted:latest
|
run_buildah 125 pull --signature-policy ${TESTSDIR}/policy.json --tls-verify=false --creds testuser:testpassword docker://localhost:5000/buildah/busybox_encrypted:latest
|
||||||
expect_output --substring "Error decrypting layer .* missing private key needed for decryption"
|
expect_output --substring "decrypting layer .* missing private key needed for decryption"
|
||||||
|
|
||||||
# Try to pull encrypted image with wrong key should fail, with diff. msg
|
# Try to pull encrypted image with wrong key should fail, with diff. msg
|
||||||
run_buildah 125 pull --signature-policy ${TESTSDIR}/policy.json --tls-verify=false --creds testuser:testpassword --decryption-key ${TESTDIR}/tmp/mykey2.pem docker://localhost:5000/buildah/busybox_encrypted:latest
|
run_buildah 125 pull --signature-policy ${TESTSDIR}/policy.json --tls-verify=false --creds testuser:testpassword --decryption-key ${TESTDIR}/tmp/mykey2.pem docker://localhost:5000/buildah/busybox_encrypted:latest
|
||||||
expect_output --substring "Error decrypting layer .* no suitable key unwrapper found or none of the private keys could be used for decryption"
|
expect_output --substring "decrypting layer .* no suitable key unwrapper found or none of the private keys could be used for decryption"
|
||||||
|
|
||||||
# Providing the right key should succeed
|
# Providing the right key should succeed
|
||||||
run_buildah pull --signature-policy ${TESTSDIR}/policy.json --tls-verify=false --creds testuser:testpassword --decryption-key ${TESTDIR}/tmp/mykey.pem docker://localhost:5000/buildah/busybox_encrypted:latest
|
run_buildah pull --signature-policy ${TESTSDIR}/policy.json --tls-verify=false --creds testuser:testpassword --decryption-key ${TESTDIR}/tmp/mykey.pem docker://localhost:5000/buildah/busybox_encrypted:latest
|
||||||
|
@ -234,11 +234,11 @@ load helpers
|
||||||
|
|
||||||
# Try to pull encrypted image without key should fail
|
# Try to pull encrypted image without key should fail
|
||||||
run_buildah 125 pull --signature-policy ${TESTSDIR}/policy.json --tls-verify=false --creds testuser:testpassword docker://localhost:5000/buildah/busybox_encrypted:latest
|
run_buildah 125 pull --signature-policy ${TESTSDIR}/policy.json --tls-verify=false --creds testuser:testpassword docker://localhost:5000/buildah/busybox_encrypted:latest
|
||||||
expect_output --substring "Error decrypting layer .* missing private key needed for decryption"
|
expect_output --substring "decrypting layer .* missing private key needed for decryption"
|
||||||
|
|
||||||
# Try to pull encrypted image with wrong key should fail
|
# Try to pull encrypted image with wrong key should fail
|
||||||
run_buildah 125 pull --signature-policy ${TESTSDIR}/policy.json --tls-verify=false --creds testuser:testpassword --decryption-key ${TESTDIR}/tmp/mykey2.pem docker://localhost:5000/buildah/busybox_encrypted:latest
|
run_buildah 125 pull --signature-policy ${TESTSDIR}/policy.json --tls-verify=false --creds testuser:testpassword --decryption-key ${TESTDIR}/tmp/mykey2.pem docker://localhost:5000/buildah/busybox_encrypted:latest
|
||||||
expect_output --substring "Error decrypting layer .* no suitable key unwrapper found or none of the private keys could be used for decryption"
|
expect_output --substring "decrypting layer .* no suitable key unwrapper found or none of the private keys could be used for decryption"
|
||||||
|
|
||||||
# Providing the right key should succeed
|
# Providing the right key should succeed
|
||||||
run_buildah pull --signature-policy ${TESTSDIR}/policy.json --tls-verify=false --creds testuser:testpassword --decryption-key ${TESTDIR}/tmp/mykey.pem docker://localhost:5000/buildah/busybox_encrypted:latest
|
run_buildah pull --signature-policy ${TESTSDIR}/policy.json --tls-verify=false --creds testuser:testpassword --decryption-key ${TESTDIR}/tmp/mykey.pem docker://localhost:5000/buildah/busybox_encrypted:latest
|
||||||
|
|
|
@ -92,7 +92,7 @@ func (d *digestingReader) Read(p []byte) (int, error) {
|
||||||
// Coverage: This should not happen, the hash.Hash interface requires
|
// Coverage: This should not happen, the hash.Hash interface requires
|
||||||
// d.digest.Write to never return an error, and the io.Writer interface
|
// d.digest.Write to never return an error, and the io.Writer interface
|
||||||
// requires n2 == len(input) if no error is returned.
|
// requires n2 == len(input) if no error is returned.
|
||||||
return 0, errors.Wrapf(err, "Error updating digest during verification: %d vs. %d", n2, n)
|
return 0, errors.Wrapf(err, "updating digest during verification: %d vs. %d", n2, n)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
|
@ -123,6 +123,7 @@ type copier struct {
|
||||||
ociEncryptConfig *encconfig.EncryptConfig
|
ociEncryptConfig *encconfig.EncryptConfig
|
||||||
maxParallelDownloads uint
|
maxParallelDownloads uint
|
||||||
downloadForeignLayers bool
|
downloadForeignLayers bool
|
||||||
|
fetchPartialBlobs bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// imageCopier tracks state specific to a single image (possibly an item of a manifest list)
|
// imageCopier tracks state specific to a single image (possibly an item of a manifest list)
|
||||||
|
@ -194,15 +195,21 @@ type Options struct {
|
||||||
// OciDecryptConfig contains the config that can be used to decrypt an image if it is
|
// OciDecryptConfig contains the config that can be used to decrypt an image if it is
|
||||||
// encrypted if non-nil. If nil, it does not attempt to decrypt an image.
|
// encrypted if non-nil. If nil, it does not attempt to decrypt an image.
|
||||||
OciDecryptConfig *encconfig.DecryptConfig
|
OciDecryptConfig *encconfig.DecryptConfig
|
||||||
|
|
||||||
// MaxParallelDownloads indicates the maximum layers to pull at the same time. A reasonable default is used if this is left as 0.
|
// MaxParallelDownloads indicates the maximum layers to pull at the same time. A reasonable default is used if this is left as 0.
|
||||||
MaxParallelDownloads uint
|
MaxParallelDownloads uint
|
||||||
|
|
||||||
// When OptimizeDestinationImageAlreadyExists is set, optimize the copy assuming that the destination image already
|
// When OptimizeDestinationImageAlreadyExists is set, optimize the copy assuming that the destination image already
|
||||||
// exists (and is equivalent). Making the eventual (no-op) copy more performant for this case. Enabling the option
|
// exists (and is equivalent). Making the eventual (no-op) copy more performant for this case. Enabling the option
|
||||||
// is slightly pessimistic if the destination image doesn't exist, or is not equivalent.
|
// is slightly pessimistic if the destination image doesn't exist, or is not equivalent.
|
||||||
OptimizeDestinationImageAlreadyExists bool
|
OptimizeDestinationImageAlreadyExists bool
|
||||||
|
|
||||||
// Download layer contents with "nondistributable" media types ("foreign" layers) and translate the layer media type
|
// Download layer contents with "nondistributable" media types ("foreign" layers) and translate the layer media type
|
||||||
// to not indicate "nondistributable".
|
// to not indicate "nondistributable".
|
||||||
DownloadForeignLayers bool
|
DownloadForeignLayers bool
|
||||||
|
|
||||||
|
// FetchPartialBlobs indicates whether to attempt to fetch the blob partially. Experimental.
|
||||||
|
FetchPartialBlobs bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// validateImageListSelection returns an error if the passed-in value is not one that we recognize as a valid ImageListSelection value
|
// validateImageListSelection returns an error if the passed-in value is not one that we recognize as a valid ImageListSelection value
|
||||||
|
@ -240,7 +247,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
||||||
|
|
||||||
dest, err := destRef.NewImageDestination(ctx, options.DestinationCtx)
|
dest, err := destRef.NewImageDestination(ctx, options.DestinationCtx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "Error initializing destination %s", transports.ImageName(destRef))
|
return nil, errors.Wrapf(err, "initializing destination %s", transports.ImageName(destRef))
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
if err := dest.Close(); err != nil {
|
if err := dest.Close(); err != nil {
|
||||||
|
@ -250,7 +257,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
||||||
|
|
||||||
rawSource, err := srcRef.NewImageSource(ctx, options.SourceCtx)
|
rawSource, err := srcRef.NewImageSource(ctx, options.SourceCtx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "Error initializing source %s", transports.ImageName(srcRef))
|
return nil, errors.Wrapf(err, "initializing source %s", transports.ImageName(srcRef))
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
if err := rawSource.Close(); err != nil {
|
if err := rawSource.Close(); err != nil {
|
||||||
|
@ -283,6 +290,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
||||||
ociEncryptConfig: options.OciEncryptConfig,
|
ociEncryptConfig: options.OciEncryptConfig,
|
||||||
maxParallelDownloads: options.MaxParallelDownloads,
|
maxParallelDownloads: options.MaxParallelDownloads,
|
||||||
downloadForeignLayers: options.DownloadForeignLayers,
|
downloadForeignLayers: options.DownloadForeignLayers,
|
||||||
|
fetchPartialBlobs: options.FetchPartialBlobs,
|
||||||
}
|
}
|
||||||
// Default to using gzip compression unless specified otherwise.
|
// Default to using gzip compression unless specified otherwise.
|
||||||
if options.DestinationCtx == nil || options.DestinationCtx.CompressionFormat == nil {
|
if options.DestinationCtx == nil || options.DestinationCtx.CompressionFormat == nil {
|
||||||
|
@ -302,7 +310,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
||||||
unparsedToplevel := image.UnparsedInstance(rawSource, nil)
|
unparsedToplevel := image.UnparsedInstance(rawSource, nil)
|
||||||
multiImage, err := isMultiImage(ctx, unparsedToplevel)
|
multiImage, err := isMultiImage(ctx, unparsedToplevel)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "Error determining manifest MIME type for %s", transports.ImageName(srcRef))
|
return nil, errors.Wrapf(err, "determining manifest MIME type for %s", transports.ImageName(srcRef))
|
||||||
}
|
}
|
||||||
|
|
||||||
if !multiImage {
|
if !multiImage {
|
||||||
|
@ -315,15 +323,15 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
||||||
// matches the current system to copy, and copy it.
|
// matches the current system to copy, and copy it.
|
||||||
mfest, manifestType, err := unparsedToplevel.Manifest(ctx)
|
mfest, manifestType, err := unparsedToplevel.Manifest(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "Error reading manifest for %s", transports.ImageName(srcRef))
|
return nil, errors.Wrapf(err, "reading manifest for %s", transports.ImageName(srcRef))
|
||||||
}
|
}
|
||||||
manifestList, err := manifest.ListFromBlob(mfest, manifestType)
|
manifestList, err := manifest.ListFromBlob(mfest, manifestType)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "Error parsing primary manifest as list for %s", transports.ImageName(srcRef))
|
return nil, errors.Wrapf(err, "parsing primary manifest as list for %s", transports.ImageName(srcRef))
|
||||||
}
|
}
|
||||||
instanceDigest, err := manifestList.ChooseInstance(options.SourceCtx) // try to pick one that matches options.SourceCtx
|
instanceDigest, err := manifestList.ChooseInstance(options.SourceCtx) // try to pick one that matches options.SourceCtx
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "Error choosing an image from manifest list %s", transports.ImageName(srcRef))
|
return nil, errors.Wrapf(err, "choosing an image from manifest list %s", transports.ImageName(srcRef))
|
||||||
}
|
}
|
||||||
logrus.Debugf("Source is a manifest list; copying (only) instance %s for current system", instanceDigest)
|
logrus.Debugf("Source is a manifest list; copying (only) instance %s for current system", instanceDigest)
|
||||||
unparsedInstance := image.UnparsedInstance(rawSource, &instanceDigest)
|
unparsedInstance := image.UnparsedInstance(rawSource, &instanceDigest)
|
||||||
|
@ -334,7 +342,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
||||||
} else { /* options.ImageListSelection == CopyAllImages or options.ImageListSelection == CopySpecificImages, */
|
} else { /* options.ImageListSelection == CopyAllImages or options.ImageListSelection == CopySpecificImages, */
|
||||||
// If we were asked to copy multiple images and can't, that's an error.
|
// If we were asked to copy multiple images and can't, that's an error.
|
||||||
if !supportsMultipleImages(c.dest) {
|
if !supportsMultipleImages(c.dest) {
|
||||||
return nil, errors.Errorf("Error copying multiple images: destination transport %q does not support copying multiple images as a group", destRef.Transport().Name())
|
return nil, errors.Errorf("copying multiple images: destination transport %q does not support copying multiple images as a group", destRef.Transport().Name())
|
||||||
}
|
}
|
||||||
// Copy some or all of the images.
|
// Copy some or all of the images.
|
||||||
switch options.ImageListSelection {
|
switch options.ImageListSelection {
|
||||||
|
@ -343,13 +351,13 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
||||||
case CopySpecificImages:
|
case CopySpecificImages:
|
||||||
logrus.Debugf("Source is a manifest list; copying some instances")
|
logrus.Debugf("Source is a manifest list; copying some instances")
|
||||||
}
|
}
|
||||||
if copiedManifest, _, err = c.copyMultipleImages(ctx, policyContext, options, unparsedToplevel); err != nil {
|
if copiedManifest, err = c.copyMultipleImages(ctx, policyContext, options, unparsedToplevel); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := c.dest.Commit(ctx, unparsedToplevel); err != nil {
|
if err := c.dest.Commit(ctx, unparsedToplevel); err != nil {
|
||||||
return nil, errors.Wrap(err, "Error committing the finished image")
|
return nil, errors.Wrap(err, "committing the finished image")
|
||||||
}
|
}
|
||||||
|
|
||||||
return copiedManifest, nil
|
return copiedManifest, nil
|
||||||
|
@ -376,12 +384,12 @@ func supportsMultipleImages(dest types.ImageDestination) bool {
|
||||||
func compareImageDestinationManifestEqual(ctx context.Context, options *Options, src types.Image, targetInstance *digest.Digest, dest types.ImageDestination) (bool, []byte, string, digest.Digest, error) {
|
func compareImageDestinationManifestEqual(ctx context.Context, options *Options, src types.Image, targetInstance *digest.Digest, dest types.ImageDestination) (bool, []byte, string, digest.Digest, error) {
|
||||||
srcManifest, _, err := src.Manifest(ctx)
|
srcManifest, _, err := src.Manifest(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, nil, "", "", errors.Wrapf(err, "Error reading manifest from image")
|
return false, nil, "", "", errors.Wrapf(err, "reading manifest from image")
|
||||||
}
|
}
|
||||||
|
|
||||||
srcManifestDigest, err := manifest.Digest(srcManifest)
|
srcManifestDigest, err := manifest.Digest(srcManifest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, nil, "", "", errors.Wrapf(err, "Error calculating manifest digest")
|
return false, nil, "", "", errors.Wrapf(err, "calculating manifest digest")
|
||||||
}
|
}
|
||||||
|
|
||||||
destImageSource, err := dest.Reference().NewImageSource(ctx, options.DestinationCtx)
|
destImageSource, err := dest.Reference().NewImageSource(ctx, options.DestinationCtx)
|
||||||
|
@ -398,7 +406,7 @@ func compareImageDestinationManifestEqual(ctx context.Context, options *Options,
|
||||||
|
|
||||||
destManifestDigest, err := manifest.Digest(destManifest)
|
destManifestDigest, err := manifest.Digest(destManifest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, nil, "", "", errors.Wrapf(err, "Error calculating manifest digest")
|
return false, nil, "", "", errors.Wrapf(err, "calculating manifest digest")
|
||||||
}
|
}
|
||||||
|
|
||||||
logrus.Debugf("Comparing source and destination manifest digests: %v vs. %v", srcManifestDigest, destManifestDigest)
|
logrus.Debugf("Comparing source and destination manifest digests: %v vs. %v", srcManifestDigest, destManifestDigest)
|
||||||
|
@ -412,15 +420,15 @@ func compareImageDestinationManifestEqual(ctx context.Context, options *Options,
|
||||||
|
|
||||||
// copyMultipleImages copies some or all of an image list's instances, using
|
// copyMultipleImages copies some or all of an image list's instances, using
|
||||||
// policyContext to validate source image admissibility.
|
// policyContext to validate source image admissibility.
|
||||||
func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedToplevel *image.UnparsedImage) (copiedManifest []byte, copiedManifestType string, retErr error) {
|
func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedToplevel *image.UnparsedImage) (copiedManifest []byte, retErr error) {
|
||||||
// Parse the list and get a copy of the original value after it's re-encoded.
|
// Parse the list and get a copy of the original value after it's re-encoded.
|
||||||
manifestList, manifestType, err := unparsedToplevel.Manifest(ctx)
|
manifestList, manifestType, err := unparsedToplevel.Manifest(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", errors.Wrapf(err, "Error reading manifest list")
|
return nil, errors.Wrapf(err, "reading manifest list")
|
||||||
}
|
}
|
||||||
originalList, err := manifest.ListFromBlob(manifestList, manifestType)
|
originalList, err := manifest.ListFromBlob(manifestList, manifestType)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", errors.Wrapf(err, "Error parsing manifest list %q", string(manifestList))
|
return nil, errors.Wrapf(err, "parsing manifest list %q", string(manifestList))
|
||||||
}
|
}
|
||||||
updatedList := originalList.Clone()
|
updatedList := originalList.Clone()
|
||||||
|
|
||||||
|
@ -432,14 +440,14 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
|
||||||
c.Printf("Getting image list signatures\n")
|
c.Printf("Getting image list signatures\n")
|
||||||
s, err := c.rawSource.GetSignatures(ctx, nil)
|
s, err := c.rawSource.GetSignatures(ctx, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", errors.Wrap(err, "Error reading signatures")
|
return nil, errors.Wrap(err, "reading signatures")
|
||||||
}
|
}
|
||||||
sigs = s
|
sigs = s
|
||||||
}
|
}
|
||||||
if len(sigs) != 0 {
|
if len(sigs) != 0 {
|
||||||
c.Printf("Checking if image list destination supports signatures\n")
|
c.Printf("Checking if image list destination supports signatures\n")
|
||||||
if err := c.dest.SupportsSignatures(ctx); err != nil {
|
if err := c.dest.SupportsSignatures(ctx); err != nil {
|
||||||
return nil, "", errors.Wrapf(err, "Can not copy signatures to %s", transports.ImageName(c.dest.Reference()))
|
return nil, errors.Wrapf(err, "Can not copy signatures to %s", transports.ImageName(c.dest.Reference()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
canModifyManifestList := (len(sigs) == 0)
|
canModifyManifestList := (len(sigs) == 0)
|
||||||
|
@ -454,11 +462,11 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
|
||||||
}
|
}
|
||||||
selectedListType, otherManifestMIMETypeCandidates, err := c.determineListConversion(manifestType, c.dest.SupportedManifestMIMETypes(), forceListMIMEType)
|
selectedListType, otherManifestMIMETypeCandidates, err := c.determineListConversion(manifestType, c.dest.SupportedManifestMIMETypes(), forceListMIMEType)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", errors.Wrapf(err, "Error determining manifest list type to write to destination")
|
return nil, errors.Wrapf(err, "determining manifest list type to write to destination")
|
||||||
}
|
}
|
||||||
if selectedListType != originalList.MIMEType() {
|
if selectedListType != originalList.MIMEType() {
|
||||||
if !canModifyManifestList {
|
if !canModifyManifestList {
|
||||||
return nil, "", errors.Errorf("Error: manifest list must be converted to type %q to be written to destination, but that would invalidate signatures", selectedListType)
|
return nil, errors.Errorf("manifest list must be converted to type %q to be written to destination, but that would invalidate signatures", selectedListType)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -483,7 +491,7 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
|
||||||
if skip {
|
if skip {
|
||||||
update, err := updatedList.Instance(instanceDigest)
|
update, err := updatedList.Instance(instanceDigest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", err
|
return nil, err
|
||||||
}
|
}
|
||||||
logrus.Debugf("Skipping instance %s (%d/%d)", instanceDigest, i+1, len(instanceDigests))
|
logrus.Debugf("Skipping instance %s (%d/%d)", instanceDigest, i+1, len(instanceDigests))
|
||||||
// Record the digest/size/type of the manifest that we didn't copy.
|
// Record the digest/size/type of the manifest that we didn't copy.
|
||||||
|
@ -496,7 +504,7 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
|
||||||
unparsedInstance := image.UnparsedInstance(c.rawSource, &instanceDigest)
|
unparsedInstance := image.UnparsedInstance(c.rawSource, &instanceDigest)
|
||||||
updatedManifest, updatedManifestType, updatedManifestDigest, err := c.copyOneImage(ctx, policyContext, options, unparsedToplevel, unparsedInstance, &instanceDigest)
|
updatedManifest, updatedManifestType, updatedManifestDigest, err := c.copyOneImage(ctx, policyContext, options, unparsedToplevel, unparsedInstance, &instanceDigest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", err
|
return nil, err
|
||||||
}
|
}
|
||||||
instancesCopied++
|
instancesCopied++
|
||||||
// Record the result of a possible conversion here.
|
// Record the result of a possible conversion here.
|
||||||
|
@ -510,7 +518,7 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
|
||||||
|
|
||||||
// Now reset the digest/size/types of the manifests in the list to account for any conversions that we made.
|
// Now reset the digest/size/types of the manifests in the list to account for any conversions that we made.
|
||||||
if err = updatedList.UpdateInstances(updates); err != nil {
|
if err = updatedList.UpdateInstances(updates); err != nil {
|
||||||
return nil, "", errors.Wrapf(err, "Error updating manifest list")
|
return nil, errors.Wrapf(err, "updating manifest list")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Iterate through supported list types, preferred format first.
|
// Iterate through supported list types, preferred format first.
|
||||||
|
@ -525,7 +533,7 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
|
||||||
if thisListType != updatedList.MIMEType() {
|
if thisListType != updatedList.MIMEType() {
|
||||||
attemptedList, err = updatedList.ConvertToMIMEType(thisListType)
|
attemptedList, err = updatedList.ConvertToMIMEType(thisListType)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", errors.Wrapf(err, "Error converting manifest list to list with MIME type %q", thisListType)
|
return nil, errors.Wrapf(err, "converting manifest list to list with MIME type %q", thisListType)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -533,17 +541,17 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
|
||||||
// by serializing them both so that we can compare them.
|
// by serializing them both so that we can compare them.
|
||||||
attemptedManifestList, err := attemptedList.Serialize()
|
attemptedManifestList, err := attemptedList.Serialize()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", errors.Wrapf(err, "Error encoding updated manifest list (%q: %#v)", updatedList.MIMEType(), updatedList.Instances())
|
return nil, errors.Wrapf(err, "encoding updated manifest list (%q: %#v)", updatedList.MIMEType(), updatedList.Instances())
|
||||||
}
|
}
|
||||||
originalManifestList, err := originalList.Serialize()
|
originalManifestList, err := originalList.Serialize()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", errors.Wrapf(err, "Error encoding original manifest list for comparison (%q: %#v)", originalList.MIMEType(), originalList.Instances())
|
return nil, errors.Wrapf(err, "encoding original manifest list for comparison (%q: %#v)", originalList.MIMEType(), originalList.Instances())
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we can't just use the original value, but we have to change it, flag an error.
|
// If we can't just use the original value, but we have to change it, flag an error.
|
||||||
if !bytes.Equal(attemptedManifestList, originalManifestList) {
|
if !bytes.Equal(attemptedManifestList, originalManifestList) {
|
||||||
if !canModifyManifestList {
|
if !canModifyManifestList {
|
||||||
return nil, "", errors.Errorf("Error: manifest list must be converted to type %q to be written to destination, but that would invalidate signatures", thisListType)
|
return nil, errors.Errorf(" manifest list must be converted to type %q to be written to destination, but that would invalidate signatures", thisListType)
|
||||||
}
|
}
|
||||||
logrus.Debugf("Manifest list has been updated")
|
logrus.Debugf("Manifest list has been updated")
|
||||||
} else {
|
} else {
|
||||||
|
@ -563,24 +571,24 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if errs != nil {
|
if errs != nil {
|
||||||
return nil, "", fmt.Errorf("Uploading manifest list failed, attempted the following formats: %s", strings.Join(errs, ", "))
|
return nil, fmt.Errorf("Uploading manifest list failed, attempted the following formats: %s", strings.Join(errs, ", "))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sign the manifest list.
|
// Sign the manifest list.
|
||||||
if options.SignBy != "" {
|
if options.SignBy != "" {
|
||||||
newSig, err := c.createSignature(manifestList, options.SignBy)
|
newSig, err := c.createSignature(manifestList, options.SignBy)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", err
|
return nil, err
|
||||||
}
|
}
|
||||||
sigs = append(sigs, newSig)
|
sigs = append(sigs, newSig)
|
||||||
}
|
}
|
||||||
|
|
||||||
c.Printf("Storing list signatures\n")
|
c.Printf("Storing list signatures\n")
|
||||||
if err := c.dest.PutSignatures(ctx, sigs, nil); err != nil {
|
if err := c.dest.PutSignatures(ctx, sigs, nil); err != nil {
|
||||||
return nil, "", errors.Wrap(err, "Error writing signatures")
|
return nil, errors.Wrap(err, "writing signatures")
|
||||||
}
|
}
|
||||||
|
|
||||||
return manifestList, selectedListType, nil
|
return manifestList, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// copyOneImage copies a single (non-manifest-list) image unparsedImage, using policyContext to validate
|
// copyOneImage copies a single (non-manifest-list) image unparsedImage, using policyContext to validate
|
||||||
|
@ -591,7 +599,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
||||||
multiImage, err := isMultiImage(ctx, unparsedImage)
|
multiImage, err := isMultiImage(ctx, unparsedImage)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// FIXME FIXME: How to name a reference for the sub-image?
|
// FIXME FIXME: How to name a reference for the sub-image?
|
||||||
return nil, "", "", errors.Wrapf(err, "Error determining manifest MIME type for %s", transports.ImageName(unparsedImage.Reference()))
|
return nil, "", "", errors.Wrapf(err, "determining manifest MIME type for %s", transports.ImageName(unparsedImage.Reference()))
|
||||||
}
|
}
|
||||||
if multiImage {
|
if multiImage {
|
||||||
return nil, "", "", fmt.Errorf("Unexpectedly received a manifest list instead of a manifest for a single image")
|
return nil, "", "", fmt.Errorf("Unexpectedly received a manifest list instead of a manifest for a single image")
|
||||||
|
@ -605,7 +613,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
||||||
}
|
}
|
||||||
src, err := image.FromUnparsedImage(ctx, options.SourceCtx, unparsedImage)
|
src, err := image.FromUnparsedImage(ctx, options.SourceCtx, unparsedImage)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", "", errors.Wrapf(err, "Error initializing image from source %s", transports.ImageName(c.rawSource.Reference()))
|
return nil, "", "", errors.Wrapf(err, "initializing image from source %s", transports.ImageName(c.rawSource.Reference()))
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the destination is a digested reference, make a note of that, determine what digest value we're
|
// If the destination is a digested reference, make a note of that, determine what digest value we're
|
||||||
|
@ -617,20 +625,20 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
||||||
destIsDigestedReference = true
|
destIsDigestedReference = true
|
||||||
sourceManifest, _, err := src.Manifest(ctx)
|
sourceManifest, _, err := src.Manifest(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", "", errors.Wrapf(err, "Error reading manifest from source image")
|
return nil, "", "", errors.Wrapf(err, "reading manifest from source image")
|
||||||
}
|
}
|
||||||
matches, err := manifest.MatchesDigest(sourceManifest, digested.Digest())
|
matches, err := manifest.MatchesDigest(sourceManifest, digested.Digest())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", "", errors.Wrapf(err, "Error computing digest of source image's manifest")
|
return nil, "", "", errors.Wrapf(err, "computing digest of source image's manifest")
|
||||||
}
|
}
|
||||||
if !matches {
|
if !matches {
|
||||||
manifestList, _, err := unparsedToplevel.Manifest(ctx)
|
manifestList, _, err := unparsedToplevel.Manifest(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", "", errors.Wrapf(err, "Error reading manifest from source image")
|
return nil, "", "", errors.Wrapf(err, "reading manifest from source image")
|
||||||
}
|
}
|
||||||
matches, err = manifest.MatchesDigest(manifestList, digested.Digest())
|
matches, err = manifest.MatchesDigest(manifestList, digested.Digest())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", "", errors.Wrapf(err, "Error computing digest of source image's manifest")
|
return nil, "", "", errors.Wrapf(err, "computing digest of source image's manifest")
|
||||||
}
|
}
|
||||||
if !matches {
|
if !matches {
|
||||||
return nil, "", "", errors.New("Digest of source image's manifest would not match destination reference")
|
return nil, "", "", errors.New("Digest of source image's manifest would not match destination reference")
|
||||||
|
@ -650,7 +658,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
||||||
c.Printf("Getting image source signatures\n")
|
c.Printf("Getting image source signatures\n")
|
||||||
s, err := src.Signatures(ctx)
|
s, err := src.Signatures(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", "", errors.Wrap(err, "Error reading signatures")
|
return nil, "", "", errors.Wrap(err, "reading signatures")
|
||||||
}
|
}
|
||||||
sigs = s
|
sigs = s
|
||||||
}
|
}
|
||||||
|
@ -785,7 +793,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
||||||
|
|
||||||
c.Printf("Storing signatures\n")
|
c.Printf("Storing signatures\n")
|
||||||
if err := c.dest.PutSignatures(ctx, sigs, targetInstance); err != nil {
|
if err := c.dest.PutSignatures(ctx, sigs, targetInstance); err != nil {
|
||||||
return nil, "", "", errors.Wrap(err, "Error writing signatures")
|
return nil, "", "", errors.Wrap(err, "writing signatures")
|
||||||
}
|
}
|
||||||
|
|
||||||
return manifestBytes, retManifestType, retManifestDigest, nil
|
return manifestBytes, retManifestType, retManifestDigest, nil
|
||||||
|
@ -805,11 +813,11 @@ func checkImageDestinationForCurrentRuntime(ctx context.Context, sys *types.Syst
|
||||||
if dest.MustMatchRuntimeOS() {
|
if dest.MustMatchRuntimeOS() {
|
||||||
c, err := src.OCIConfig(ctx)
|
c, err := src.OCIConfig(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "Error parsing image configuration")
|
return errors.Wrapf(err, "parsing image configuration")
|
||||||
}
|
}
|
||||||
wantedPlatforms, err := platform.WantedPlatforms(sys)
|
wantedPlatforms, err := platform.WantedPlatforms(sys)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "error getting current platform information %#v", sys)
|
return errors.Wrapf(err, "getting current platform information %#v", sys)
|
||||||
}
|
}
|
||||||
|
|
||||||
options := newOrderedSet()
|
options := newOrderedSet()
|
||||||
|
@ -1034,13 +1042,13 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanc
|
||||||
}
|
}
|
||||||
pi, err := ic.src.UpdatedImage(ctx, *ic.manifestUpdates)
|
pi, err := ic.src.UpdatedImage(ctx, *ic.manifestUpdates)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", errors.Wrap(err, "Error creating an updated image manifest")
|
return nil, "", errors.Wrap(err, "creating an updated image manifest")
|
||||||
}
|
}
|
||||||
pendingImage = pi
|
pendingImage = pi
|
||||||
}
|
}
|
||||||
man, _, err := pendingImage.Manifest(ctx)
|
man, _, err := pendingImage.Manifest(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", errors.Wrap(err, "Error reading manifest")
|
return nil, "", errors.Wrap(err, "reading manifest")
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := ic.c.copyConfig(ctx, pendingImage); err != nil {
|
if err := ic.c.copyConfig(ctx, pendingImage); err != nil {
|
||||||
|
@ -1056,7 +1064,7 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanc
|
||||||
instanceDigest = &manifestDigest
|
instanceDigest = &manifestDigest
|
||||||
}
|
}
|
||||||
if err := ic.c.dest.PutManifest(ctx, man, instanceDigest); err != nil {
|
if err := ic.c.dest.PutManifest(ctx, man, instanceDigest); err != nil {
|
||||||
return nil, "", errors.Wrapf(err, "Error writing manifest %q", string(man))
|
return nil, "", errors.Wrapf(err, "writing manifest %q", string(man))
|
||||||
}
|
}
|
||||||
return man, manifestDigest, nil
|
return man, manifestDigest, nil
|
||||||
}
|
}
|
||||||
|
@ -1072,9 +1080,25 @@ func (c *copier) newProgressPool(ctx context.Context) (*mpb.Progress, func()) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// customPartialBlobCounter provides a decorator function for the partial blobs retrieval progress bar
|
||||||
|
func customPartialBlobCounter(filler interface{}, wcc ...decor.WC) decor.Decorator {
|
||||||
|
producer := func(filler interface{}) decor.DecorFunc {
|
||||||
|
return func(s decor.Statistics) string {
|
||||||
|
if s.Total == 0 {
|
||||||
|
pairFmt := "%.1f / %.1f (skipped: %.1f)"
|
||||||
|
return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill))
|
||||||
|
}
|
||||||
|
pairFmt := "%.1f / %.1f (skipped: %.1f = %.2f%%)"
|
||||||
|
percentage := 100.0 * float64(s.Refill) / float64(s.Total)
|
||||||
|
return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill), percentage)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return decor.Any(producer(filler), wcc...)
|
||||||
|
}
|
||||||
|
|
||||||
// createProgressBar creates a mpb.Bar in pool. Note that if the copier's reportWriter
|
// createProgressBar creates a mpb.Bar in pool. Note that if the copier's reportWriter
|
||||||
// is ioutil.Discard, the progress bar's output will be discarded
|
// is ioutil.Discard, the progress bar's output will be discarded
|
||||||
func (c *copier) createProgressBar(pool *mpb.Progress, info types.BlobInfo, kind string, onComplete string) *mpb.Bar {
|
func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.BlobInfo, kind string, onComplete string) *mpb.Bar {
|
||||||
// shortDigestLen is the length of the digest used for blobs.
|
// shortDigestLen is the length of the digest used for blobs.
|
||||||
const shortDigestLen = 12
|
const shortDigestLen = 12
|
||||||
|
|
||||||
|
@ -1091,18 +1115,30 @@ func (c *copier) createProgressBar(pool *mpb.Progress, info types.BlobInfo, kind
|
||||||
// Use a normal progress bar when we know the size (i.e., size > 0).
|
// Use a normal progress bar when we know the size (i.e., size > 0).
|
||||||
// Otherwise, use a spinner to indicate that something's happening.
|
// Otherwise, use a spinner to indicate that something's happening.
|
||||||
var bar *mpb.Bar
|
var bar *mpb.Bar
|
||||||
|
sstyle := mpb.SpinnerStyle(".", "..", "...", "....", "").PositionLeft()
|
||||||
if info.Size > 0 {
|
if info.Size > 0 {
|
||||||
bar = pool.AddBar(info.Size,
|
if partial {
|
||||||
mpb.BarFillerClearOnComplete(),
|
bar = pool.AddBar(info.Size,
|
||||||
mpb.PrependDecorators(
|
mpb.BarFillerClearOnComplete(),
|
||||||
decor.OnComplete(decor.Name(prefix), onComplete),
|
mpb.PrependDecorators(
|
||||||
),
|
decor.OnComplete(decor.Name(prefix), onComplete),
|
||||||
mpb.AppendDecorators(
|
),
|
||||||
decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), ""),
|
mpb.AppendDecorators(
|
||||||
),
|
customPartialBlobCounter(sstyle.Build()),
|
||||||
)
|
),
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
bar = pool.AddBar(info.Size,
|
||||||
|
mpb.BarFillerClearOnComplete(),
|
||||||
|
mpb.PrependDecorators(
|
||||||
|
decor.OnComplete(decor.Name(prefix), onComplete),
|
||||||
|
),
|
||||||
|
mpb.AppendDecorators(
|
||||||
|
decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), ""),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
sstyle := mpb.SpinnerStyle(".", "..", "...", "....", "").PositionLeft()
|
|
||||||
bar = pool.Add(0,
|
bar = pool.Add(0,
|
||||||
sstyle.Build(),
|
sstyle.Build(),
|
||||||
mpb.BarFillerClearOnComplete(),
|
mpb.BarFillerClearOnComplete(),
|
||||||
|
@ -1123,13 +1159,13 @@ func (c *copier) copyConfig(ctx context.Context, src types.Image) error {
|
||||||
if srcInfo.Digest != "" {
|
if srcInfo.Digest != "" {
|
||||||
configBlob, err := src.ConfigBlob(ctx)
|
configBlob, err := src.ConfigBlob(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "Error reading config blob %s", srcInfo.Digest)
|
return errors.Wrapf(err, "reading config blob %s", srcInfo.Digest)
|
||||||
}
|
}
|
||||||
|
|
||||||
destInfo, err := func() (types.BlobInfo, error) { // A scope for defer
|
destInfo, err := func() (types.BlobInfo, error) { // A scope for defer
|
||||||
progressPool, progressCleanup := c.newProgressPool(ctx)
|
progressPool, progressCleanup := c.newProgressPool(ctx)
|
||||||
defer progressCleanup()
|
defer progressCleanup()
|
||||||
bar := c.createProgressBar(progressPool, srcInfo, "config", "done")
|
bar := c.createProgressBar(progressPool, false, srcInfo, "config", "done")
|
||||||
destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, false, bar, -1, false)
|
destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, false, bar, -1, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return types.BlobInfo{}, err
|
return types.BlobInfo{}, err
|
||||||
|
@ -1213,11 +1249,11 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return types.BlobInfo{}, "", errors.Wrapf(err, "Error trying to reuse blob %s at destination", srcInfo.Digest)
|
return types.BlobInfo{}, "", errors.Wrapf(err, "trying to reuse blob %s at destination", srcInfo.Digest)
|
||||||
}
|
}
|
||||||
if reused {
|
if reused {
|
||||||
logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest)
|
logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest)
|
||||||
bar := ic.c.createProgressBar(pool, srcInfo, "blob", "skipped: already exists")
|
bar := ic.c.createProgressBar(pool, false, srcInfo, "blob", "skipped: already exists")
|
||||||
bar.SetTotal(0, true)
|
bar.SetTotal(0, true)
|
||||||
|
|
||||||
// Throw an event that the layer has been skipped
|
// Throw an event that the layer has been skipped
|
||||||
|
@ -1244,14 +1280,57 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// A partial pull is managed by the destination storage, that decides what portions
|
||||||
|
// of the source file are not known yet and must be fetched.
|
||||||
|
// Attempt a partial only when the source allows to retrieve a blob partially and
|
||||||
|
// the destination has support for it.
|
||||||
|
imgSource, okSource := ic.c.rawSource.(internalTypes.ImageSourceSeekable)
|
||||||
|
imgDest, okDest := ic.c.dest.(internalTypes.ImageDestinationPartial)
|
||||||
|
if ic.c.fetchPartialBlobs && okSource && okDest && !diffIDIsNeeded {
|
||||||
|
bar := ic.c.createProgressBar(pool, true, srcInfo, "blob", "done")
|
||||||
|
|
||||||
|
progress := make(chan int64)
|
||||||
|
terminate := make(chan interface{})
|
||||||
|
|
||||||
|
defer close(terminate)
|
||||||
|
defer close(progress)
|
||||||
|
|
||||||
|
proxy := imageSourceSeekableProxy{
|
||||||
|
source: imgSource,
|
||||||
|
progress: progress,
|
||||||
|
}
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case written := <-progress:
|
||||||
|
bar.IncrInt64(written)
|
||||||
|
case <-terminate:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
bar.SetTotal(srcInfo.Size, false)
|
||||||
|
info, err := imgDest.PutBlobPartial(ctx, proxy, srcInfo, ic.c.blobInfoCache)
|
||||||
|
if err == nil {
|
||||||
|
bar.SetRefill(srcInfo.Size - bar.Current())
|
||||||
|
bar.SetCurrent(srcInfo.Size)
|
||||||
|
bar.SetTotal(srcInfo.Size, true)
|
||||||
|
logrus.Debugf("Retrieved partial blob %v", srcInfo.Digest)
|
||||||
|
return info, cachedDiffID, nil
|
||||||
|
}
|
||||||
|
bar.Abort(true)
|
||||||
|
logrus.Errorf("Failed to retrieve partial blob: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
// Fallback: copy the layer, computing the diffID if we need to do so
|
// Fallback: copy the layer, computing the diffID if we need to do so
|
||||||
srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache)
|
srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return types.BlobInfo{}, "", errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest)
|
return types.BlobInfo{}, "", errors.Wrapf(err, "reading blob %s", srcInfo.Digest)
|
||||||
}
|
}
|
||||||
defer srcStream.Close()
|
defer srcStream.Close()
|
||||||
|
|
||||||
bar := ic.c.createProgressBar(pool, srcInfo, "blob", "done")
|
bar := ic.c.createProgressBar(pool, false, srcInfo, "blob", "done")
|
||||||
|
|
||||||
blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize, MediaType: srcInfo.MediaType, Annotations: srcInfo.Annotations}, diffIDIsNeeded, toEncrypt, bar, layerIndex, emptyLayer)
|
blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize, MediaType: srcInfo.MediaType, Annotations: srcInfo.Annotations}, diffIDIsNeeded, toEncrypt, bar, layerIndex, emptyLayer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -1265,7 +1344,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
|
||||||
return types.BlobInfo{}, "", ctx.Err()
|
return types.BlobInfo{}, "", ctx.Err()
|
||||||
case diffIDResult := <-diffIDChan:
|
case diffIDResult := <-diffIDChan:
|
||||||
if diffIDResult.err != nil {
|
if diffIDResult.err != nil {
|
||||||
return types.BlobInfo{}, "", errors.Wrap(diffIDResult.err, "Error computing layer DiffID")
|
return types.BlobInfo{}, "", errors.Wrap(diffIDResult.err, "computing layer DiffID")
|
||||||
}
|
}
|
||||||
logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest)
|
logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest)
|
||||||
// This is safe because we have just computed diffIDResult.Digest ourselves, and in the process
|
// This is safe because we have just computed diffIDResult.Digest ourselves, and in the process
|
||||||
|
@ -1288,7 +1367,7 @@ func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Rea
|
||||||
var getDiffIDRecorder func(compression.DecompressorFunc) io.Writer // = nil
|
var getDiffIDRecorder func(compression.DecompressorFunc) io.Writer // = nil
|
||||||
var diffIDChan chan diffIDResult
|
var diffIDChan chan diffIDResult
|
||||||
|
|
||||||
err := errors.New("Internal error: unexpected panic in copyLayer") // For pipeWriter.CloseWithError below
|
err := errors.New("Internal error: unexpected panic in copyLayer") // For pipeWriter.CloseWithbelow
|
||||||
if diffIDIsNeeded {
|
if diffIDIsNeeded {
|
||||||
diffIDChan = make(chan diffIDResult, 1) // Buffered, so that sending a value after this or our caller has failed and exited does not block.
|
diffIDChan = make(chan diffIDResult, 1) // Buffered, so that sending a value after this or our caller has failed and exited does not block.
|
||||||
pipeReader, pipeWriter := io.Pipe()
|
pipeReader, pipeWriter := io.Pipe()
|
||||||
|
@ -1350,7 +1429,7 @@ type errorAnnotationReader struct {
|
||||||
func (r errorAnnotationReader) Read(b []byte) (n int, err error) {
|
func (r errorAnnotationReader) Read(b []byte) (n int, err error) {
|
||||||
n, err = r.reader.Read(b)
|
n, err = r.reader.Read(b)
|
||||||
if err != io.EOF {
|
if err != io.EOF {
|
||||||
return n, errors.Wrapf(err, "error happened during read")
|
return n, errors.Wrapf(err, "happened during read")
|
||||||
}
|
}
|
||||||
return n, err
|
return n, err
|
||||||
}
|
}
|
||||||
|
@ -1377,7 +1456,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||||
// read stream to the end, and validation does not happen.
|
// read stream to the end, and validation does not happen.
|
||||||
digestingReader, err := newDigestingReader(srcStream, srcInfo.Digest)
|
digestingReader, err := newDigestingReader(srcStream, srcInfo.Digest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return types.BlobInfo{}, errors.Wrapf(err, "Error preparing to verify blob %s", srcInfo.Digest)
|
return types.BlobInfo{}, errors.Wrapf(err, "preparing to verify blob %s", srcInfo.Digest)
|
||||||
}
|
}
|
||||||
var destStream io.Reader = digestingReader
|
var destStream io.Reader = digestingReader
|
||||||
|
|
||||||
|
@ -1391,7 +1470,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||||
var d digest.Digest
|
var d digest.Digest
|
||||||
destStream, d, err = ocicrypt.DecryptLayer(c.ociDecryptConfig, destStream, newDesc, false)
|
destStream, d, err = ocicrypt.DecryptLayer(c.ociDecryptConfig, destStream, newDesc, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return types.BlobInfo{}, errors.Wrapf(err, "Error decrypting layer %s", srcInfo.Digest)
|
return types.BlobInfo{}, errors.Wrapf(err, "decrypting layer %s", srcInfo.Digest)
|
||||||
}
|
}
|
||||||
|
|
||||||
srcInfo.Digest = d
|
srcInfo.Digest = d
|
||||||
|
@ -1408,7 +1487,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||||
// This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression.
|
// This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression.
|
||||||
compressionFormat, decompressor, destStream, err := compression.DetectCompressionFormat(destStream) // We could skip this in some cases, but let's keep the code path uniform
|
compressionFormat, decompressor, destStream, err := compression.DetectCompressionFormat(destStream) // We could skip this in some cases, but let's keep the code path uniform
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return types.BlobInfo{}, errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest)
|
return types.BlobInfo{}, errors.Wrapf(err, "reading blob %s", srcInfo.Digest)
|
||||||
}
|
}
|
||||||
isCompressed := decompressor != nil
|
isCompressed := decompressor != nil
|
||||||
if expectedCompressionFormat, known := expectedCompressionFormats[srcInfo.MediaType]; known && isCompressed && compressionFormat.Name() != expectedCompressionFormat.Name() {
|
if expectedCompressionFormat, known := expectedCompressionFormats[srcInfo.MediaType]; known && isCompressed && compressionFormat.Name() != expectedCompressionFormat.Name() {
|
||||||
|
@ -1425,6 +1504,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||||
originalLayerReader = destStream
|
originalLayerReader = destStream
|
||||||
}
|
}
|
||||||
|
|
||||||
|
compressionMetadata := map[string]string{}
|
||||||
// === Deal with layer compression/decompression if necessary
|
// === Deal with layer compression/decompression if necessary
|
||||||
// WARNING: If you are adding new reasons to change the blob, update also the OptimizeDestinationImageAlreadyExists
|
// WARNING: If you are adding new reasons to change the blob, update also the OptimizeDestinationImageAlreadyExists
|
||||||
// short-circuit conditions
|
// short-circuit conditions
|
||||||
|
@ -1453,7 +1533,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||||
// If this fails while writing data, it will do pipeWriter.CloseWithError(); if it fails otherwise,
|
// If this fails while writing data, it will do pipeWriter.CloseWithError(); if it fails otherwise,
|
||||||
// e.g. because we have exited and due to pipeReader.Close() above further writing to the pipe has failed,
|
// e.g. because we have exited and due to pipeReader.Close() above further writing to the pipe has failed,
|
||||||
// we don’t care.
|
// we don’t care.
|
||||||
go c.compressGoroutine(pipeWriter, destStream, *uploadCompressionFormat) // Closes pipeWriter
|
go c.compressGoroutine(pipeWriter, destStream, compressionMetadata, *uploadCompressionFormat) // Closes pipeWriter
|
||||||
destStream = pipeReader
|
destStream = pipeReader
|
||||||
inputInfo.Digest = ""
|
inputInfo.Digest = ""
|
||||||
inputInfo.Size = -1
|
inputInfo.Size = -1
|
||||||
|
@ -1473,7 +1553,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||||
pipeReader, pipeWriter := io.Pipe()
|
pipeReader, pipeWriter := io.Pipe()
|
||||||
defer pipeReader.Close()
|
defer pipeReader.Close()
|
||||||
|
|
||||||
go c.compressGoroutine(pipeWriter, s, *uploadCompressionFormat) // Closes pipeWriter
|
go c.compressGoroutine(pipeWriter, s, compressionMetadata, *uploadCompressionFormat) // Closes pipeWriter
|
||||||
|
|
||||||
destStream = pipeReader
|
destStream = pipeReader
|
||||||
inputInfo.Digest = ""
|
inputInfo.Digest = ""
|
||||||
|
@ -1533,7 +1613,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||||
|
|
||||||
s, fin, err := ocicrypt.EncryptLayer(c.ociEncryptConfig, destStream, desc)
|
s, fin, err := ocicrypt.EncryptLayer(c.ociEncryptConfig, destStream, desc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return types.BlobInfo{}, errors.Wrapf(err, "Error encrypting blob %s", srcInfo.Digest)
|
return types.BlobInfo{}, errors.Wrapf(err, "encrypting blob %s", srcInfo.Digest)
|
||||||
}
|
}
|
||||||
|
|
||||||
destStream = s
|
destStream = s
|
||||||
|
@ -1576,7 +1656,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||||
uploadedInfo, err = c.dest.PutBlob(ctx, &errorAnnotationReader{destStream}, inputInfo, c.blobInfoCache, isConfig)
|
uploadedInfo, err = c.dest.PutBlob(ctx, &errorAnnotationReader{destStream}, inputInfo, c.blobInfoCache, isConfig)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return types.BlobInfo{}, errors.Wrap(err, "Error writing blob")
|
return types.BlobInfo{}, errors.Wrap(err, "writing blob")
|
||||||
}
|
}
|
||||||
|
|
||||||
uploadedInfo.Annotations = srcInfo.Annotations
|
uploadedInfo.Annotations = srcInfo.Annotations
|
||||||
|
@ -1608,7 +1688,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||||
logrus.Debugf("Consuming rest of the original blob to satisfy getOriginalLayerCopyWriter")
|
logrus.Debugf("Consuming rest of the original blob to satisfy getOriginalLayerCopyWriter")
|
||||||
_, err := io.Copy(ioutil.Discard, originalLayerReader)
|
_, err := io.Copy(ioutil.Discard, originalLayerReader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return types.BlobInfo{}, errors.Wrapf(err, "Error reading input blob %s", srcInfo.Digest)
|
return types.BlobInfo{}, errors.Wrapf(err, "reading input blob %s", srcInfo.Digest)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1640,23 +1720,42 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||||
c.blobInfoCache.RecordDigestCompressorName(srcInfo.Digest, srcCompressorName)
|
c.blobInfoCache.RecordDigestCompressorName(srcInfo.Digest, srcCompressorName)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Copy all the metadata generated by the compressor into the annotations.
|
||||||
|
if uploadedInfo.Annotations == nil {
|
||||||
|
uploadedInfo.Annotations = map[string]string{}
|
||||||
|
}
|
||||||
|
for k, v := range compressionMetadata {
|
||||||
|
uploadedInfo.Annotations[k] = v
|
||||||
|
}
|
||||||
|
|
||||||
return uploadedInfo, nil
|
return uploadedInfo, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// doCompression reads all input from src and writes its compressed equivalent to dest.
|
||||||
|
func doCompression(dest io.Writer, src io.Reader, metadata map[string]string, compressionFormat compression.Algorithm, compressionLevel *int) error {
|
||||||
|
compressor, err := compression.CompressStreamWithMetadata(dest, metadata, compressionFormat, compressionLevel)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := make([]byte, compressionBufferSize)
|
||||||
|
|
||||||
|
_, err = io.CopyBuffer(compressor, src, buf) // Sets err to nil, i.e. causes dest.Close()
|
||||||
|
if err != nil {
|
||||||
|
compressor.Close()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return compressor.Close()
|
||||||
|
}
|
||||||
|
|
||||||
// compressGoroutine reads all input from src and writes its compressed equivalent to dest.
|
// compressGoroutine reads all input from src and writes its compressed equivalent to dest.
|
||||||
func (c *copier) compressGoroutine(dest *io.PipeWriter, src io.Reader, compressionFormat compression.Algorithm) {
|
func (c *copier) compressGoroutine(dest *io.PipeWriter, src io.Reader, metadata map[string]string, compressionFormat compression.Algorithm) {
|
||||||
err := errors.New("Internal error: unexpected panic in compressGoroutine")
|
err := errors.New("Internal error: unexpected panic in compressGoroutine")
|
||||||
defer func() { // Note that this is not the same as {defer dest.CloseWithError(err)}; we need err to be evaluated lazily.
|
defer func() { // Note that this is not the same as {defer dest.CloseWithError(err)}; we need err to be evaluated lazily.
|
||||||
_ = dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close(), always returns nil
|
_ = dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close(), always returns nil
|
||||||
}()
|
}()
|
||||||
|
|
||||||
compressor, err := compression.CompressStream(dest, compressionFormat, c.compressionLevel)
|
err = doCompression(dest, src, metadata, compressionFormat, c.compressionLevel)
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer compressor.Close()
|
|
||||||
|
|
||||||
buf := make([]byte, compressionBufferSize)
|
|
||||||
|
|
||||||
_, err = io.CopyBuffer(compressor, src, buf) // Sets err to nil, i.e. causes dest.Close()
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -45,7 +45,7 @@ func (os *orderedSet) append(s string) {
|
||||||
func (ic *imageCopier) determineManifestConversion(ctx context.Context, destSupportedManifestMIMETypes []string, forceManifestMIMEType string, requiresOciEncryption bool) (string, []string, error) {
|
func (ic *imageCopier) determineManifestConversion(ctx context.Context, destSupportedManifestMIMETypes []string, forceManifestMIMEType string, requiresOciEncryption bool) (string, []string, error) {
|
||||||
_, srcType, err := ic.src.Manifest(ctx)
|
_, srcType, err := ic.src.Manifest(ctx)
|
||||||
if err != nil { // This should have been cached?!
|
if err != nil { // This should have been cached?!
|
||||||
return "", nil, errors.Wrap(err, "Error reading manifest")
|
return "", nil, errors.Wrap(err, "reading manifest")
|
||||||
}
|
}
|
||||||
normalizedSrcType := manifest.NormalizedMIMEType(srcType)
|
normalizedSrcType := manifest.NormalizedMIMEType(srcType)
|
||||||
if srcType != normalizedSrcType {
|
if srcType != normalizedSrcType {
|
||||||
|
@ -137,30 +137,29 @@ func (c *copier) determineListConversion(currentListMIMEType string, destSupport
|
||||||
if forcedListMIMEType != "" {
|
if forcedListMIMEType != "" {
|
||||||
destSupportedMIMETypes = []string{forcedListMIMEType}
|
destSupportedMIMETypes = []string{forcedListMIMEType}
|
||||||
}
|
}
|
||||||
var selectedType string
|
|
||||||
var otherSupportedTypes []string
|
prioritizedTypes := newOrderedSet()
|
||||||
for i := range destSupportedMIMETypes {
|
// The first priority is the current type, if it's in the list, since that lets us avoid a
|
||||||
// The second priority is the first member of the list of acceptable types that is a list,
|
// conversion that isn't strictly necessary.
|
||||||
// but keep going in case current type occurs later in the list.
|
for _, t := range destSupportedMIMETypes {
|
||||||
if selectedType == "" && manifest.MIMETypeIsMultiImage(destSupportedMIMETypes[i]) {
|
if t == currentListMIMEType {
|
||||||
selectedType = destSupportedMIMETypes[i]
|
prioritizedTypes.append(currentListMIMEType)
|
||||||
}
|
break
|
||||||
// The first priority is the current type, if it's in the list, since that lets us avoid a
|
|
||||||
// conversion that isn't strictly necessary.
|
|
||||||
if destSupportedMIMETypes[i] == currentListMIMEType {
|
|
||||||
selectedType = destSupportedMIMETypes[i]
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Pick out the other list types that we support.
|
// Pick out the other list types that we support.
|
||||||
for i := range destSupportedMIMETypes {
|
for _, t := range destSupportedMIMETypes {
|
||||||
if selectedType != destSupportedMIMETypes[i] && manifest.MIMETypeIsMultiImage(destSupportedMIMETypes[i]) {
|
if manifest.MIMETypeIsMultiImage(t) {
|
||||||
otherSupportedTypes = append(otherSupportedTypes, destSupportedMIMETypes[i])
|
prioritizedTypes.append(t)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
logrus.Debugf("Manifest list has MIME type %s, ordered candidate list [%s]", currentListMIMEType, strings.Join(destSupportedMIMETypes, ", "))
|
logrus.Debugf("Manifest list has MIME type %s, ordered candidate list [%s]", currentListMIMEType, strings.Join(destSupportedMIMETypes, ", "))
|
||||||
if selectedType == "" {
|
if len(prioritizedTypes.list) == 0 {
|
||||||
return "", nil, errors.Errorf("destination does not support any supported manifest list types (%v)", manifest.SupportedListMIMETypes)
|
return "", nil, errors.Errorf("destination does not support any supported manifest list types (%v)", manifest.SupportedListMIMETypes)
|
||||||
}
|
}
|
||||||
|
selectedType := prioritizedTypes.list[0]
|
||||||
|
otherSupportedTypes := prioritizedTypes.list[1:]
|
||||||
if selectedType != currentListMIMEType {
|
if selectedType != currentListMIMEType {
|
||||||
logrus.Debugf("... will convert to %s first, and then try %v", selectedType, otherSupportedTypes)
|
logrus.Debugf("... will convert to %s first, and then try %v", selectedType, otherSupportedTypes)
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -1,9 +1,11 @@
|
||||||
package copy
|
package copy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"io"
|
"io"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
internalTypes "github.com/containers/image/v5/internal/types"
|
||||||
"github.com/containers/image/v5/types"
|
"github.com/containers/image/v5/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -77,3 +79,26 @@ func (r *progressReader) Read(p []byte) (int, error) {
|
||||||
}
|
}
|
||||||
return n, err
|
return n, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// imageSourceSeekableProxy wraps ImageSourceSeekable and keeps track of how many bytes
|
||||||
|
// are received.
|
||||||
|
type imageSourceSeekableProxy struct {
|
||||||
|
// source is the seekable input to read from.
|
||||||
|
source internalTypes.ImageSourceSeekable
|
||||||
|
// progress is the chan where the total number of bytes read so far are reported.
|
||||||
|
progress chan int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBlobAt reads from the ImageSourceSeekable and report how many bytes were received
|
||||||
|
// to the progress chan.
|
||||||
|
func (s imageSourceSeekableProxy) GetBlobAt(ctx context.Context, bInfo types.BlobInfo, chunks []internalTypes.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
|
||||||
|
rc, errs, err := s.source.GetBlobAt(ctx, bInfo, chunks)
|
||||||
|
if err == nil {
|
||||||
|
total := int64(0)
|
||||||
|
for _, c := range chunks {
|
||||||
|
total += int64(c.Length)
|
||||||
|
}
|
||||||
|
s.progress <- total
|
||||||
|
}
|
||||||
|
return rc, errs, err
|
||||||
|
}
|
||||||
|
|
|
@ -10,7 +10,7 @@ import (
|
||||||
func (c *copier) createSignature(manifest []byte, keyIdentity string) ([]byte, error) {
|
func (c *copier) createSignature(manifest []byte, keyIdentity string) ([]byte, error) {
|
||||||
mech, err := signature.NewGPGSigningMechanism()
|
mech, err := signature.NewGPGSigningMechanism()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Error initializing GPG")
|
return nil, errors.Wrap(err, "initializing GPG")
|
||||||
}
|
}
|
||||||
defer mech.Close()
|
defer mech.Close()
|
||||||
if err := mech.SupportsSigning(); err != nil {
|
if err := mech.SupportsSigning(); err != nil {
|
||||||
|
@ -25,7 +25,7 @@ func (c *copier) createSignature(manifest []byte, keyIdentity string) ([]byte, e
|
||||||
c.Printf("Signing manifest\n")
|
c.Printf("Signing manifest\n")
|
||||||
newSig, err := signature.SignDockerManifest(manifest, dockerReference.String(), mech, keyIdentity)
|
newSig, err := signature.SignDockerManifest(manifest, dockerReference.String(), mech, keyIdentity)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Error creating signature")
|
return nil, errors.Wrap(err, "creating signature")
|
||||||
}
|
}
|
||||||
return newSig, nil
|
return newSig, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -34,7 +34,7 @@ func newImageDestination(ref dirReference, compress bool) (types.ImageDestinatio
|
||||||
// if the contents don't match throw an error
|
// if the contents don't match throw an error
|
||||||
dirExists, err := pathExists(d.ref.resolvedPath)
|
dirExists, err := pathExists(d.ref.resolvedPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "error checking for path %q", d.ref.resolvedPath)
|
return nil, errors.Wrapf(err, "checking for path %q", d.ref.resolvedPath)
|
||||||
}
|
}
|
||||||
if dirExists {
|
if dirExists {
|
||||||
isEmpty, err := isDirEmpty(d.ref.resolvedPath)
|
isEmpty, err := isDirEmpty(d.ref.resolvedPath)
|
||||||
|
@ -45,7 +45,7 @@ func newImageDestination(ref dirReference, compress bool) (types.ImageDestinatio
|
||||||
if !isEmpty {
|
if !isEmpty {
|
||||||
versionExists, err := pathExists(d.ref.versionPath())
|
versionExists, err := pathExists(d.ref.versionPath())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "error checking if path exists %q", d.ref.versionPath())
|
return nil, errors.Wrapf(err, "checking if path exists %q", d.ref.versionPath())
|
||||||
}
|
}
|
||||||
if versionExists {
|
if versionExists {
|
||||||
contents, err := ioutil.ReadFile(d.ref.versionPath())
|
contents, err := ioutil.ReadFile(d.ref.versionPath())
|
||||||
|
@ -61,7 +61,7 @@ func newImageDestination(ref dirReference, compress bool) (types.ImageDestinatio
|
||||||
}
|
}
|
||||||
// delete directory contents so that only one image is in the directory at a time
|
// delete directory contents so that only one image is in the directory at a time
|
||||||
if err = removeDirContents(d.ref.resolvedPath); err != nil {
|
if err = removeDirContents(d.ref.resolvedPath); err != nil {
|
||||||
return nil, errors.Wrapf(err, "error erasing contents in %q", d.ref.resolvedPath)
|
return nil, errors.Wrapf(err, "erasing contents in %q", d.ref.resolvedPath)
|
||||||
}
|
}
|
||||||
logrus.Debugf("overwriting existing container image directory %q", d.ref.resolvedPath)
|
logrus.Debugf("overwriting existing container image directory %q", d.ref.resolvedPath)
|
||||||
}
|
}
|
||||||
|
@ -74,7 +74,7 @@ func newImageDestination(ref dirReference, compress bool) (types.ImageDestinatio
|
||||||
// create version file
|
// create version file
|
||||||
err = ioutil.WriteFile(d.ref.versionPath(), []byte(version), 0644)
|
err = ioutil.WriteFile(d.ref.versionPath(), []byte(version), 0644)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "error creating version file %q", d.ref.versionPath())
|
return nil, errors.Wrapf(err, "creating version file %q", d.ref.versionPath())
|
||||||
}
|
}
|
||||||
return d, nil
|
return d, nil
|
||||||
}
|
}
|
||||||
|
@ -239,6 +239,9 @@ func (d *dirImageDestination) PutSignatures(ctx context.Context, signatures [][]
|
||||||
}
|
}
|
||||||
|
|
||||||
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
|
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
|
||||||
|
// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
|
||||||
|
// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
|
||||||
|
// original manifest list digest, if desired.
|
||||||
// WARNING: This does not have any transactional semantics:
|
// WARNING: This does not have any transactional semantics:
|
||||||
// - Uploaded data MAY be visible to others before Commit() is called
|
// - Uploaded data MAY be visible to others before Commit() is called
|
||||||
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
|
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
|
||||||
|
|
|
@ -67,6 +67,9 @@ func (d *archiveImageDestination) Close() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
|
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
|
||||||
|
// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
|
||||||
|
// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
|
||||||
|
// original manifest list digest, if desired.
|
||||||
// WARNING: This does not have any transactional semantics:
|
// WARNING: This does not have any transactional semantics:
|
||||||
// - Uploaded data MAY be visible to others before Commit() is called
|
// - Uploaded data MAY be visible to others before Commit() is called
|
||||||
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
|
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
|
||||||
|
|
|
@ -81,14 +81,14 @@ func (r *Reader) List() ([][]types.ImageReference, error) {
|
||||||
}
|
}
|
||||||
ref, err := newReference(r.path, nt, -1, r.archive, nil)
|
ref, err := newReference(r.path, nt, -1, r.archive, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "Error creating a reference for tag %#v in manifest item @%d", tag, imageIndex)
|
return nil, errors.Wrapf(err, "creating a reference for tag %#v in manifest item @%d", tag, imageIndex)
|
||||||
}
|
}
|
||||||
refs = append(refs, ref)
|
refs = append(refs, ref)
|
||||||
}
|
}
|
||||||
if len(refs) == 0 {
|
if len(refs) == 0 {
|
||||||
ref, err := newReference(r.path, nil, imageIndex, r.archive, nil)
|
ref, err := newReference(r.path, nil, imageIndex, r.archive, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "Error creating a reference for manifest item @%d", imageIndex)
|
return nil, errors.Wrapf(err, "creating a reference for manifest item @%d", imageIndex)
|
||||||
}
|
}
|
||||||
refs = append(refs, ref)
|
refs = append(refs, ref)
|
||||||
}
|
}
|
||||||
|
|
|
@ -60,7 +60,7 @@ func openArchiveForWriting(path string) (*os.File, error) {
|
||||||
// only in a different way. Either way, it’s up to the user to not have two writers to the same path.)
|
// only in a different way. Either way, it’s up to the user to not have two writers to the same path.)
|
||||||
fh, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0644)
|
fh, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0644)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "error opening file %q", path)
|
return nil, errors.Wrapf(err, "opening file %q", path)
|
||||||
}
|
}
|
||||||
succeeded := false
|
succeeded := false
|
||||||
defer func() {
|
defer func() {
|
||||||
|
@ -70,7 +70,7 @@ func openArchiveForWriting(path string) (*os.File, error) {
|
||||||
}()
|
}()
|
||||||
fhStat, err := fh.Stat()
|
fhStat, err := fh.Stat()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "error statting file %q", path)
|
return nil, errors.Wrapf(err, "statting file %q", path)
|
||||||
}
|
}
|
||||||
|
|
||||||
if fhStat.Mode().IsRegular() && fhStat.Size() != 0 {
|
if fhStat.Mode().IsRegular() && fhStat.Size() != 0 {
|
||||||
|
|
|
@ -42,7 +42,7 @@ func newImageDestination(ctx context.Context, sys *types.SystemContext, ref daem
|
||||||
|
|
||||||
c, err := newDockerClient(sys)
|
c, err := newDockerClient(sys)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Error initializing docker engine client")
|
return nil, errors.Wrap(err, "initializing docker engine client")
|
||||||
}
|
}
|
||||||
|
|
||||||
reader, writer := io.Pipe()
|
reader, writer := io.Pipe()
|
||||||
|
@ -84,7 +84,7 @@ func imageLoadGoroutine(ctx context.Context, c *client.Client, reader *io.PipeRe
|
||||||
|
|
||||||
resp, err := c.ImageLoad(ctx, reader, true)
|
resp, err := c.ImageLoad(ctx, reader, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = errors.Wrap(err, "Error saving image to docker engine")
|
err = errors.Wrap(err, "saving image to docker engine")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
|
@ -128,6 +128,9 @@ func (d *daemonImageDestination) Reference() types.ImageReference {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
|
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
|
||||||
|
// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
|
||||||
|
// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
|
||||||
|
// original manifest list digest, if desired.
|
||||||
// WARNING: This does not have any transactional semantics:
|
// WARNING: This does not have any transactional semantics:
|
||||||
// - Uploaded data MAY be visible to others before Commit() is called
|
// - Uploaded data MAY be visible to others before Commit() is called
|
||||||
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
|
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
|
||||||
|
|
|
@ -25,13 +25,13 @@ type daemonImageSource struct {
|
||||||
func newImageSource(ctx context.Context, sys *types.SystemContext, ref daemonReference) (types.ImageSource, error) {
|
func newImageSource(ctx context.Context, sys *types.SystemContext, ref daemonReference) (types.ImageSource, error) {
|
||||||
c, err := newDockerClient(sys)
|
c, err := newDockerClient(sys)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Error initializing docker engine client")
|
return nil, errors.Wrap(err, "initializing docker engine client")
|
||||||
}
|
}
|
||||||
// Per NewReference(), ref.StringWithinTransport() is either an image ID (config digest), or a !reference.NameOnly() reference.
|
// Per NewReference(), ref.StringWithinTransport() is either an image ID (config digest), or a !reference.NameOnly() reference.
|
||||||
// Either way ImageSave should create a tarball with exactly one image.
|
// Either way ImageSave should create a tarball with exactly one image.
|
||||||
inputStream, err := c.ImageSave(ctx, []string{ref.StringWithinTransport()})
|
inputStream, err := c.ImageSave(ctx, []string{ref.StringWithinTransport()})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Error loading image from docker engine")
|
return nil, errors.Wrap(err, "loading image from docker engine")
|
||||||
}
|
}
|
||||||
defer inputStream.Close()
|
defer inputStream.Close()
|
||||||
|
|
||||||
|
|
|
@ -92,7 +92,7 @@ type bearerToken struct {
|
||||||
expirationTime time.Time
|
expirationTime time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
// dockerClient is configuration for dealing with a single Docker registry.
|
// dockerClient is configuration for dealing with a single container registry.
|
||||||
type dockerClient struct {
|
type dockerClient struct {
|
||||||
// The following members are set by newDockerClient and do not change afterwards.
|
// The following members are set by newDockerClient and do not change afterwards.
|
||||||
sys *types.SystemContext
|
sys *types.SystemContext
|
||||||
|
@ -213,10 +213,9 @@ func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) {
|
||||||
// “write” specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection)
|
// “write” specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection)
|
||||||
// signatureBase is always set in the return value
|
// signatureBase is always set in the return value
|
||||||
func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, write bool, actions string) (*dockerClient, error) {
|
func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, write bool, actions string) (*dockerClient, error) {
|
||||||
registry := reference.Domain(ref.ref)
|
auth, err := config.GetCredentialsForRef(sys, ref.ref)
|
||||||
auth, err := config.GetCredentials(sys, registry)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "error getting username and password")
|
return nil, errors.Wrapf(err, "getting username and password")
|
||||||
}
|
}
|
||||||
|
|
||||||
sigBase, err := SignatureStorageBaseURL(sys, ref, write)
|
sigBase, err := SignatureStorageBaseURL(sys, ref, write)
|
||||||
|
@ -224,6 +223,7 @@ func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, write
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
registry := reference.Domain(ref.ref)
|
||||||
client, err := newDockerClient(sys, registry, ref.ref.Name())
|
client, err := newDockerClient(sys, registry, ref.ref.Name())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -269,7 +269,7 @@ func newDockerClient(sys *types.SystemContext, registry, reference string) (*doc
|
||||||
skipVerify := false
|
skipVerify := false
|
||||||
reg, err := sysregistriesv2.FindRegistry(sys, reference)
|
reg, err := sysregistriesv2.FindRegistry(sys, reference)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "error loading registries")
|
return nil, errors.Wrapf(err, "loading registries")
|
||||||
}
|
}
|
||||||
if reg != nil {
|
if reg != nil {
|
||||||
if reg.Blocked {
|
if reg.Blocked {
|
||||||
|
@ -297,7 +297,7 @@ func newDockerClient(sys *types.SystemContext, registry, reference string) (*doc
|
||||||
func CheckAuth(ctx context.Context, sys *types.SystemContext, username, password, registry string) error {
|
func CheckAuth(ctx context.Context, sys *types.SystemContext, username, password, registry string) error {
|
||||||
client, err := newDockerClient(sys, registry, registry)
|
client, err := newDockerClient(sys, registry, registry)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "error creating new docker client")
|
return errors.Wrapf(err, "creating new docker client")
|
||||||
}
|
}
|
||||||
client.auth = types.DockerAuthConfig{
|
client.auth = types.DockerAuthConfig{
|
||||||
Username: username,
|
Username: username,
|
||||||
|
@ -343,9 +343,10 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
|
||||||
v1Res := &V1Results{}
|
v1Res := &V1Results{}
|
||||||
|
|
||||||
// Get credentials from authfile for the underlying hostname
|
// Get credentials from authfile for the underlying hostname
|
||||||
auth, err := config.GetCredentials(sys, registry)
|
// lint:ignore SA1019 We can't use GetCredentialsForRef because we want to search the whole registry.
|
||||||
|
auth, err := config.GetCredentials(sys, registry) // nolint:staticcheck // https://github.com/golangci/golangci-lint/issues/741
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "error getting username and password")
|
return nil, errors.Wrapf(err, "getting username and password")
|
||||||
}
|
}
|
||||||
|
|
||||||
// The /v2/_catalog endpoint has been disabled for docker.io therefore
|
// The /v2/_catalog endpoint has been disabled for docker.io therefore
|
||||||
|
@ -359,7 +360,7 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
|
||||||
|
|
||||||
client, err := newDockerClient(sys, hostname, registry)
|
client, err := newDockerClient(sys, hostname, registry)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "error creating new docker client")
|
return nil, errors.Wrapf(err, "creating new docker client")
|
||||||
}
|
}
|
||||||
client.auth = auth
|
client.auth = auth
|
||||||
if sys != nil {
|
if sys != nil {
|
||||||
|
@ -422,7 +423,14 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
|
||||||
res := SearchResult{
|
res := SearchResult{
|
||||||
Name: repo,
|
Name: repo,
|
||||||
}
|
}
|
||||||
searchRes = append(searchRes, res)
|
// bugzilla.redhat.com/show_bug.cgi?id=1976283
|
||||||
|
// If we have a full match, make sure it's listed as the first result.
|
||||||
|
// (Note there might be a full match we never see if we reach the result limit first.)
|
||||||
|
if repo == image {
|
||||||
|
searchRes = append([]SearchResult{res}, searchRes...)
|
||||||
|
} else {
|
||||||
|
searchRes = append(searchRes, res)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -751,7 +759,7 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error {
|
||||||
err = ping("http")
|
err = ping("http")
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = errors.Wrapf(err, "error pinging docker registry %s", c.registry)
|
err = errors.Wrapf(err, "pinging container registry %s", c.registry)
|
||||||
if c.sys != nil && c.sys.DockerDisableV1Ping {
|
if c.sys != nil && c.sys.DockerDisableV1Ping {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -799,7 +807,7 @@ func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerRe
|
||||||
defer res.Body.Close()
|
defer res.Body.Close()
|
||||||
|
|
||||||
if res.StatusCode != http.StatusOK {
|
if res.StatusCode != http.StatusOK {
|
||||||
return nil, errors.Wrapf(clientLib.HandleErrorResponse(res), "Error downloading signatures for %s in %s", manifestDigest, ref.ref.Name())
|
return nil, errors.Wrapf(clientLib.HandleErrorResponse(res), "downloading signatures for %s in %s", manifestDigest, ref.ref.Name())
|
||||||
}
|
}
|
||||||
|
|
||||||
body, err := iolimits.ReadAtMost(res.Body, iolimits.MaxSignatureListBodySize)
|
body, err := iolimits.ReadAtMost(res.Body, iolimits.MaxSignatureListBodySize)
|
||||||
|
@ -809,7 +817,7 @@ func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerRe
|
||||||
|
|
||||||
var parsedBody extensionSignatureList
|
var parsedBody extensionSignatureList
|
||||||
if err := json.Unmarshal(body, &parsedBody); err != nil {
|
if err := json.Unmarshal(body, &parsedBody); err != nil {
|
||||||
return nil, errors.Wrapf(err, "Error decoding signature list")
|
return nil, errors.Wrapf(err, "decoding signature list")
|
||||||
}
|
}
|
||||||
return &parsedBody, nil
|
return &parsedBody, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -73,7 +73,7 @@ func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types.
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer res.Body.Close()
|
defer res.Body.Close()
|
||||||
if err := httpResponseToError(res, "Error fetching tags list"); err != nil {
|
if err := httpResponseToError(res, "fetching tags list"); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -141,7 +141,7 @@ func GetDigest(ctx context.Context, sys *types.SystemContext, ref types.ImageRef
|
||||||
|
|
||||||
defer res.Body.Close()
|
defer res.Body.Close()
|
||||||
if res.StatusCode != http.StatusOK {
|
if res.StatusCode != http.StatusOK {
|
||||||
return "", errors.Wrapf(registryHTTPResponseToError(res), "Error reading digest %s in %s", tagOrDigest, dr.ref.Name())
|
return "", errors.Wrapf(registryHTTPResponseToError(res), "reading digest %s in %s", tagOrDigest, dr.ref.Name())
|
||||||
}
|
}
|
||||||
|
|
||||||
dig, err := digest.Parse(res.Header.Get("Docker-Content-Digest"))
|
dig, err := digest.Parse(res.Header.Get("Docker-Content-Digest"))
|
||||||
|
|
|
@ -154,11 +154,11 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
|
||||||
defer res.Body.Close()
|
defer res.Body.Close()
|
||||||
if res.StatusCode != http.StatusAccepted {
|
if res.StatusCode != http.StatusAccepted {
|
||||||
logrus.Debugf("Error initiating layer upload, response %#v", *res)
|
logrus.Debugf("Error initiating layer upload, response %#v", *res)
|
||||||
return types.BlobInfo{}, errors.Wrapf(registryHTTPResponseToError(res), "Error initiating layer upload to %s in %s", uploadPath, d.c.registry)
|
return types.BlobInfo{}, errors.Wrapf(registryHTTPResponseToError(res), "initiating layer upload to %s in %s", uploadPath, d.c.registry)
|
||||||
}
|
}
|
||||||
uploadLocation, err := res.Location()
|
uploadLocation, err := res.Location()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return types.BlobInfo{}, errors.Wrap(err, "Error determining upload URL")
|
return types.BlobInfo{}, errors.Wrap(err, "determining upload URL")
|
||||||
}
|
}
|
||||||
|
|
||||||
digester := digest.Canonical.Digester()
|
digester := digest.Canonical.Digester()
|
||||||
|
@ -175,11 +175,11 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
|
||||||
}
|
}
|
||||||
defer res.Body.Close()
|
defer res.Body.Close()
|
||||||
if !successStatus(res.StatusCode) {
|
if !successStatus(res.StatusCode) {
|
||||||
return nil, errors.Wrapf(registryHTTPResponseToError(res), "Error uploading layer chunked")
|
return nil, errors.Wrapf(registryHTTPResponseToError(res), "uploading layer chunked")
|
||||||
}
|
}
|
||||||
uploadLocation, err := res.Location()
|
uploadLocation, err := res.Location()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Error determining upload URL")
|
return nil, errors.Wrap(err, "determining upload URL")
|
||||||
}
|
}
|
||||||
return uploadLocation, nil
|
return uploadLocation, nil
|
||||||
}()
|
}()
|
||||||
|
@ -201,7 +201,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
|
||||||
defer res.Body.Close()
|
defer res.Body.Close()
|
||||||
if res.StatusCode != http.StatusCreated {
|
if res.StatusCode != http.StatusCreated {
|
||||||
logrus.Debugf("Error uploading layer, response %#v", *res)
|
logrus.Debugf("Error uploading layer, response %#v", *res)
|
||||||
return types.BlobInfo{}, errors.Wrapf(registryHTTPResponseToError(res), "Error uploading layer to %s", uploadLocation)
|
return types.BlobInfo{}, errors.Wrapf(registryHTTPResponseToError(res), "uploading layer to %s", uploadLocation)
|
||||||
}
|
}
|
||||||
|
|
||||||
logrus.Debugf("Upload of layer %s complete", computedDigest)
|
logrus.Debugf("Upload of layer %s complete", computedDigest)
|
||||||
|
@ -226,7 +226,7 @@ func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference.
|
||||||
return true, getBlobSize(res), nil
|
return true, getBlobSize(res), nil
|
||||||
case http.StatusUnauthorized:
|
case http.StatusUnauthorized:
|
||||||
logrus.Debugf("... not authorized")
|
logrus.Debugf("... not authorized")
|
||||||
return false, -1, errors.Wrapf(registryHTTPResponseToError(res), "Error checking whether a blob %s exists in %s", digest, repo.Name())
|
return false, -1, errors.Wrapf(registryHTTPResponseToError(res), "checking whether a blob %s exists in %s", digest, repo.Name())
|
||||||
case http.StatusNotFound:
|
case http.StatusNotFound:
|
||||||
logrus.Debugf("... not present")
|
logrus.Debugf("... not present")
|
||||||
return false, -1, nil
|
return false, -1, nil
|
||||||
|
@ -261,7 +261,7 @@ func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo referenc
|
||||||
// NOTE: This does not really work in docker/distribution servers, which incorrectly require the "delete" action in the token's scope, and is thus entirely untested.
|
// NOTE: This does not really work in docker/distribution servers, which incorrectly require the "delete" action in the token's scope, and is thus entirely untested.
|
||||||
uploadLocation, err := res.Location()
|
uploadLocation, err := res.Location()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "Error determining upload URL after a mount attempt")
|
return errors.Wrap(err, "determining upload URL after a mount attempt")
|
||||||
}
|
}
|
||||||
logrus.Debugf("... started an upload instead of mounting, trying to cancel at %s", uploadLocation.String())
|
logrus.Debugf("... started an upload instead of mounting, trying to cancel at %s", uploadLocation.String())
|
||||||
res2, err := d.c.makeRequestToResolvedURL(ctx, "DELETE", uploadLocation.String(), nil, nil, -1, v2Auth, extraScope)
|
res2, err := d.c.makeRequestToResolvedURL(ctx, "DELETE", uploadLocation.String(), nil, nil, -1, v2Auth, extraScope)
|
||||||
|
@ -277,7 +277,7 @@ func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo referenc
|
||||||
return fmt.Errorf("Mounting %s from %s to %s started an upload instead", srcDigest, srcRepo.Name(), d.ref.ref.Name())
|
return fmt.Errorf("Mounting %s from %s to %s started an upload instead", srcDigest, srcRepo.Name(), d.ref.ref.Name())
|
||||||
default:
|
default:
|
||||||
logrus.Debugf("Error mounting, response %#v", *res)
|
logrus.Debugf("Error mounting, response %#v", *res)
|
||||||
return errors.Wrapf(registryHTTPResponseToError(res), "Error mounting %s from %s to %s", srcDigest, srcRepo.Name(), d.ref.ref.Name())
|
return errors.Wrapf(registryHTTPResponseToError(res), "mounting %s from %s to %s", srcDigest, srcRepo.Name(), d.ref.ref.Name())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -392,7 +392,7 @@ func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte, inst
|
||||||
// Double-check that the manifest we've been given matches the digest we've been given.
|
// Double-check that the manifest we've been given matches the digest we've been given.
|
||||||
matches, err := manifest.MatchesDigest(m, *instanceDigest)
|
matches, err := manifest.MatchesDigest(m, *instanceDigest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "error digesting manifest in PutManifest")
|
return errors.Wrapf(err, "digesting manifest in PutManifest")
|
||||||
}
|
}
|
||||||
if !matches {
|
if !matches {
|
||||||
manifestDigest, merr := manifest.Digest(m)
|
manifestDigest, merr := manifest.Digest(m)
|
||||||
|
@ -430,7 +430,7 @@ func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte, inst
|
||||||
}
|
}
|
||||||
defer res.Body.Close()
|
defer res.Body.Close()
|
||||||
if !successStatus(res.StatusCode) {
|
if !successStatus(res.StatusCode) {
|
||||||
err = errors.Wrapf(registryHTTPResponseToError(res), "Error uploading manifest %s to %s", refTail, d.ref.ref.Name())
|
err = errors.Wrapf(registryHTTPResponseToError(res), "uploading manifest %s to %s", refTail, d.ref.ref.Name())
|
||||||
if isManifestInvalidError(errors.Cause(err)) {
|
if isManifestInvalidError(errors.Cause(err)) {
|
||||||
err = types.ManifestTypeRejectedError{Err: err}
|
err = types.ManifestTypeRejectedError{Err: err}
|
||||||
}
|
}
|
||||||
|
@ -621,7 +621,7 @@ sigExists:
|
||||||
randBytes := make([]byte, 16)
|
randBytes := make([]byte, 16)
|
||||||
n, err := rand.Read(randBytes)
|
n, err := rand.Read(randBytes)
|
||||||
if err != nil || n != 16 {
|
if err != nil || n != 16 {
|
||||||
return errors.Wrapf(err, "Error generating random signature len %d", n)
|
return errors.Wrapf(err, "generating random signature len %d", n)
|
||||||
}
|
}
|
||||||
signatureName = fmt.Sprintf("%s@%032x", manifestDigest.String(), randBytes)
|
signatureName = fmt.Sprintf("%s@%032x", manifestDigest.String(), randBytes)
|
||||||
if _, ok := existingSigNames[signatureName]; !ok {
|
if _, ok := existingSigNames[signatureName]; !ok {
|
||||||
|
@ -651,7 +651,7 @@ sigExists:
|
||||||
logrus.Debugf("Error body %s", string(body))
|
logrus.Debugf("Error body %s", string(body))
|
||||||
}
|
}
|
||||||
logrus.Debugf("Error uploading signature, status %d, %#v", res.StatusCode, res)
|
logrus.Debugf("Error uploading signature, status %d, %#v", res.StatusCode, res)
|
||||||
return errors.Wrapf(registryHTTPResponseToError(res), "Error uploading signature to %s in %s", path, d.c.registry)
|
return errors.Wrapf(registryHTTPResponseToError(res), "uploading signature to %s in %s", path, d.c.registry)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -659,6 +659,9 @@ sigExists:
|
||||||
}
|
}
|
||||||
|
|
||||||
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
|
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
|
||||||
|
// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
|
||||||
|
// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
|
||||||
|
// original manifest list digest, if desired.
|
||||||
// WARNING: This does not have any transactional semantics:
|
// WARNING: This does not have any transactional semantics:
|
||||||
// - Uploaded data MAY be visible to others before Commit() is called
|
// - Uploaded data MAY be visible to others before Commit() is called
|
||||||
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
|
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
|
||||||
|
|
|
@ -6,14 +6,17 @@ import (
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"mime"
|
"mime"
|
||||||
|
"mime/multipart"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
"github.com/containers/image/v5/docker/reference"
|
"github.com/containers/image/v5/docker/reference"
|
||||||
"github.com/containers/image/v5/internal/iolimits"
|
"github.com/containers/image/v5/internal/iolimits"
|
||||||
|
internalTypes "github.com/containers/image/v5/internal/types"
|
||||||
"github.com/containers/image/v5/manifest"
|
"github.com/containers/image/v5/manifest"
|
||||||
"github.com/containers/image/v5/pkg/sysregistriesv2"
|
"github.com/containers/image/v5/pkg/sysregistriesv2"
|
||||||
"github.com/containers/image/v5/types"
|
"github.com/containers/image/v5/types"
|
||||||
|
@ -36,7 +39,7 @@ type dockerImageSource struct {
|
||||||
func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerReference) (*dockerImageSource, error) {
|
func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerReference) (*dockerImageSource, error) {
|
||||||
registry, err := sysregistriesv2.FindRegistry(sys, ref.ref.Name())
|
registry, err := sysregistriesv2.FindRegistry(sys, ref.ref.Name())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "error loading registries configuration")
|
return nil, errors.Wrapf(err, "loading registries configuration")
|
||||||
}
|
}
|
||||||
if registry == nil {
|
if registry == nil {
|
||||||
// No configuration was found for the provided reference, so use the
|
// No configuration was found for the provided reference, so use the
|
||||||
|
@ -69,7 +72,6 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerRef
|
||||||
} else {
|
} else {
|
||||||
logrus.Debugf("Trying to access %q", pullSource.Reference)
|
logrus.Debugf("Trying to access %q", pullSource.Reference)
|
||||||
}
|
}
|
||||||
logrus.Debugf("Trying to access %q", pullSource.Reference)
|
|
||||||
s, err := newImageSourceAttempt(ctx, sys, ref, pullSource)
|
s, err := newImageSourceAttempt(ctx, sys, ref, pullSource)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return s, nil
|
return s, nil
|
||||||
|
@ -197,7 +199,7 @@ func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest strin
|
||||||
logrus.Debugf("Content-Type from manifest GET is %q", res.Header.Get("Content-Type"))
|
logrus.Debugf("Content-Type from manifest GET is %q", res.Header.Get("Content-Type"))
|
||||||
defer res.Body.Close()
|
defer res.Body.Close()
|
||||||
if res.StatusCode != http.StatusOK {
|
if res.StatusCode != http.StatusOK {
|
||||||
return nil, "", errors.Wrapf(registryHTTPResponseToError(res), "Error reading manifest %s in %s", tagOrDigest, s.physicalRef.ref.Name())
|
return nil, "", errors.Wrapf(registryHTTPResponseToError(res), "reading manifest %s in %s", tagOrDigest, s.physicalRef.ref.Name())
|
||||||
}
|
}
|
||||||
|
|
||||||
manblob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxManifestBodySize)
|
manblob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxManifestBodySize)
|
||||||
|
@ -276,6 +278,82 @@ func (s *dockerImageSource) HasThreadSafeGetBlob() bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetBlobAt returns a stream for the specified blob.
|
||||||
|
func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []internalTypes.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
|
||||||
|
headers := make(map[string][]string)
|
||||||
|
|
||||||
|
var rangeVals []string
|
||||||
|
for _, c := range chunks {
|
||||||
|
rangeVals = append(rangeVals, fmt.Sprintf("%d-%d", c.Offset, c.Offset+c.Length-1))
|
||||||
|
}
|
||||||
|
|
||||||
|
headers["Range"] = []string{fmt.Sprintf("bytes=%s", strings.Join(rangeVals, ","))}
|
||||||
|
|
||||||
|
if len(info.URLs) != 0 {
|
||||||
|
return nil, nil, fmt.Errorf("external URLs not supported with GetBlobAt")
|
||||||
|
}
|
||||||
|
|
||||||
|
path := fmt.Sprintf(blobsPath, reference.Path(s.physicalRef.ref), info.Digest.String())
|
||||||
|
logrus.Debugf("Downloading %s", path)
|
||||||
|
res, err := s.c.makeRequest(ctx, "GET", path, headers, nil, v2Auth, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
if err := httpResponseToError(res, "Error fetching partial blob"); err != nil {
|
||||||
|
if res.Body != nil {
|
||||||
|
res.Body.Close()
|
||||||
|
}
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
if res.StatusCode != http.StatusPartialContent {
|
||||||
|
res.Body.Close()
|
||||||
|
return nil, nil, errors.Errorf("invalid status code returned when fetching blob %d (%s)", res.StatusCode, http.StatusText(res.StatusCode))
|
||||||
|
}
|
||||||
|
|
||||||
|
mediaType, params, err := mime.ParseMediaType(res.Header.Get("Content-Type"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
streams := make(chan io.ReadCloser)
|
||||||
|
errs := make(chan error)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
defer close(streams)
|
||||||
|
defer close(errs)
|
||||||
|
if !strings.HasPrefix(mediaType, "multipart/") {
|
||||||
|
streams <- res.Body
|
||||||
|
return
|
||||||
|
}
|
||||||
|
boundary, found := params["boundary"]
|
||||||
|
if !found {
|
||||||
|
errs <- errors.Errorf("could not find boundary")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
buffered := makeBufferedNetworkReader(res.Body, 64, 16384)
|
||||||
|
defer buffered.Close()
|
||||||
|
mr := multipart.NewReader(buffered, boundary)
|
||||||
|
for {
|
||||||
|
p, err := mr.NextPart()
|
||||||
|
if err != nil {
|
||||||
|
if err != io.EOF {
|
||||||
|
errs <- err
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s := signalCloseReader{
|
||||||
|
Closed: make(chan interface{}),
|
||||||
|
Stream: p,
|
||||||
|
}
|
||||||
|
streams <- s
|
||||||
|
// NextPart() cannot be called while the current part
|
||||||
|
// is being read, so wait until it is closed
|
||||||
|
<-s.Closed
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return streams, errs, nil
|
||||||
|
}
|
||||||
|
|
||||||
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
||||||
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
|
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
|
||||||
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
|
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
|
||||||
|
@ -499,3 +577,119 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type bufferedNetworkReaderBuffer struct {
|
||||||
|
data []byte
|
||||||
|
len int
|
||||||
|
consumed int
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
type bufferedNetworkReader struct {
|
||||||
|
stream io.Reader
|
||||||
|
emptyBuffer chan *bufferedNetworkReaderBuffer
|
||||||
|
readyBuffer chan *bufferedNetworkReaderBuffer
|
||||||
|
terminate chan bool
|
||||||
|
current *bufferedNetworkReaderBuffer
|
||||||
|
mutex sync.Mutex
|
||||||
|
gotEOF bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleBufferedNetworkReader runs in a goroutine
|
||||||
|
func handleBufferedNetworkReader(br *bufferedNetworkReader) {
|
||||||
|
defer close(br.readyBuffer)
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case b := <-br.emptyBuffer:
|
||||||
|
b.len, b.err = br.stream.Read(b.data)
|
||||||
|
br.readyBuffer <- b
|
||||||
|
if b.err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
case <-br.terminate:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *bufferedNetworkReader) Close() {
|
||||||
|
close(n.terminate)
|
||||||
|
close(n.emptyBuffer)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *bufferedNetworkReader) read(p []byte) (int, error) {
|
||||||
|
if n.current != nil {
|
||||||
|
copied := copy(p, n.current.data[n.current.consumed:n.current.len])
|
||||||
|
n.current.consumed += copied
|
||||||
|
if n.current.consumed == n.current.len {
|
||||||
|
n.emptyBuffer <- n.current
|
||||||
|
n.current = nil
|
||||||
|
}
|
||||||
|
if copied > 0 {
|
||||||
|
return copied, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if n.gotEOF {
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
var b *bufferedNetworkReaderBuffer
|
||||||
|
|
||||||
|
select {
|
||||||
|
case b = <-n.readyBuffer:
|
||||||
|
if b.err != nil {
|
||||||
|
if b.err != io.EOF {
|
||||||
|
return b.len, b.err
|
||||||
|
}
|
||||||
|
n.gotEOF = true
|
||||||
|
}
|
||||||
|
b.consumed = 0
|
||||||
|
n.current = b
|
||||||
|
return n.read(p)
|
||||||
|
case <-n.terminate:
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *bufferedNetworkReader) Read(p []byte) (int, error) {
|
||||||
|
n.mutex.Lock()
|
||||||
|
defer n.mutex.Unlock()
|
||||||
|
|
||||||
|
return n.read(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeBufferedNetworkReader(stream io.Reader, nBuffers, bufferSize uint) *bufferedNetworkReader {
|
||||||
|
br := bufferedNetworkReader{
|
||||||
|
stream: stream,
|
||||||
|
emptyBuffer: make(chan *bufferedNetworkReaderBuffer, nBuffers),
|
||||||
|
readyBuffer: make(chan *bufferedNetworkReaderBuffer, nBuffers),
|
||||||
|
terminate: make(chan bool),
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
handleBufferedNetworkReader(&br)
|
||||||
|
}()
|
||||||
|
|
||||||
|
for i := uint(0); i < nBuffers; i++ {
|
||||||
|
b := bufferedNetworkReaderBuffer{
|
||||||
|
data: make([]byte, bufferSize),
|
||||||
|
}
|
||||||
|
br.emptyBuffer <- &b
|
||||||
|
}
|
||||||
|
|
||||||
|
return &br
|
||||||
|
}
|
||||||
|
|
||||||
|
type signalCloseReader struct {
|
||||||
|
Closed chan interface{}
|
||||||
|
Stream io.ReadCloser
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s signalCloseReader) Read(p []byte) (int, error) {
|
||||||
|
return s.Stream.Read(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s signalCloseReader) Close() error {
|
||||||
|
defer close(s.Closed)
|
||||||
|
return s.Stream.Close()
|
||||||
|
}
|
||||||
|
|
|
@ -16,7 +16,7 @@ func init() {
|
||||||
transports.Register(Transport)
|
transports.Register(Transport)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Transport is an ImageTransport for Docker registry-hosted images.
|
// Transport is an ImageTransport for container registry-hosted images.
|
||||||
var Transport = dockerTransport{}
|
var Transport = dockerTransport{}
|
||||||
|
|
||||||
type dockerTransport struct{}
|
type dockerTransport struct{}
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
|
internalTypes "github.com/containers/image/v5/internal/types"
|
||||||
"github.com/docker/distribution/registry/client"
|
"github.com/docker/distribution/registry/client"
|
||||||
perrors "github.com/pkg/errors"
|
perrors "github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
@ -12,7 +13,7 @@ import (
|
||||||
var (
|
var (
|
||||||
// ErrV1NotSupported is returned when we're trying to talk to a
|
// ErrV1NotSupported is returned when we're trying to talk to a
|
||||||
// docker V1 registry.
|
// docker V1 registry.
|
||||||
ErrV1NotSupported = errors.New("can't talk to a V1 docker registry")
|
ErrV1NotSupported = errors.New("can't talk to a V1 container registry")
|
||||||
// ErrTooManyRequests is returned when the status code returned is 429
|
// ErrTooManyRequests is returned when the status code returned is 429
|
||||||
ErrTooManyRequests = errors.New("too many requests to registry")
|
ErrTooManyRequests = errors.New("too many requests to registry")
|
||||||
)
|
)
|
||||||
|
@ -32,11 +33,15 @@ func httpResponseToError(res *http.Response, context string) error {
|
||||||
switch res.StatusCode {
|
switch res.StatusCode {
|
||||||
case http.StatusOK:
|
case http.StatusOK:
|
||||||
return nil
|
return nil
|
||||||
|
case http.StatusPartialContent:
|
||||||
|
return nil
|
||||||
case http.StatusTooManyRequests:
|
case http.StatusTooManyRequests:
|
||||||
return ErrTooManyRequests
|
return ErrTooManyRequests
|
||||||
case http.StatusUnauthorized:
|
case http.StatusUnauthorized:
|
||||||
err := client.HandleErrorResponse(res)
|
err := client.HandleErrorResponse(res)
|
||||||
return ErrUnauthorizedForCredentials{Err: err}
|
return ErrUnauthorizedForCredentials{Err: err}
|
||||||
|
case http.StatusBadRequest:
|
||||||
|
return internalTypes.BadPartialRequestError{Status: res.Status}
|
||||||
default:
|
default:
|
||||||
if context != "" {
|
if context != "" {
|
||||||
context = context + ": "
|
context = context + ": "
|
||||||
|
|
|
@ -140,11 +140,11 @@ func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo t
|
||||||
if isConfig {
|
if isConfig {
|
||||||
buf, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize)
|
buf, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return types.BlobInfo{}, errors.Wrap(err, "Error reading Config file stream")
|
return types.BlobInfo{}, errors.Wrap(err, "reading Config file stream")
|
||||||
}
|
}
|
||||||
d.config = buf
|
d.config = buf
|
||||||
if err := d.archive.sendFileLocked(d.archive.configPath(inputInfo.Digest), inputInfo.Size, bytes.NewReader(buf)); err != nil {
|
if err := d.archive.sendFileLocked(d.archive.configPath(inputInfo.Digest), inputInfo.Size, bytes.NewReader(buf)); err != nil {
|
||||||
return types.BlobInfo{}, errors.Wrap(err, "Error writing Config file")
|
return types.BlobInfo{}, errors.Wrap(err, "writing Config file")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if err := d.archive.sendFileLocked(d.archive.physicalLayerPath(inputInfo.Digest), inputInfo.Size, stream); err != nil {
|
if err := d.archive.sendFileLocked(d.archive.physicalLayerPath(inputInfo.Digest), inputInfo.Size, stream); err != nil {
|
||||||
|
@ -187,7 +187,7 @@ func (d *Destination) PutManifest(ctx context.Context, m []byte, instanceDigest
|
||||||
// so the caller trying a different manifest kind would be pointless.
|
// so the caller trying a different manifest kind would be pointless.
|
||||||
var man manifest.Schema2
|
var man manifest.Schema2
|
||||||
if err := json.Unmarshal(m, &man); err != nil {
|
if err := json.Unmarshal(m, &man); err != nil {
|
||||||
return errors.Wrap(err, "Error parsing manifest")
|
return errors.Wrap(err, "parsing manifest")
|
||||||
}
|
}
|
||||||
if man.SchemaVersion != 2 || man.MediaType != manifest.DockerV2Schema2MediaType {
|
if man.SchemaVersion != 2 || man.MediaType != manifest.DockerV2Schema2MediaType {
|
||||||
return errors.Errorf("Unsupported manifest type, need a Docker schema 2 manifest")
|
return errors.Errorf("Unsupported manifest type, need a Docker schema 2 manifest")
|
||||||
|
|
|
@ -30,7 +30,7 @@ type Reader struct {
|
||||||
func NewReaderFromFile(sys *types.SystemContext, path string) (*Reader, error) {
|
func NewReaderFromFile(sys *types.SystemContext, path string) (*Reader, error) {
|
||||||
file, err := os.Open(path)
|
file, err := os.Open(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "error opening file %q", path)
|
return nil, errors.Wrapf(err, "opening file %q", path)
|
||||||
}
|
}
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
|
|
||||||
|
@ -38,7 +38,7 @@ func NewReaderFromFile(sys *types.SystemContext, path string) (*Reader, error) {
|
||||||
// as a source. Otherwise we pass the stream to NewReaderFromStream.
|
// as a source. Otherwise we pass the stream to NewReaderFromStream.
|
||||||
stream, isCompressed, err := compression.AutoDecompress(file)
|
stream, isCompressed, err := compression.AutoDecompress(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "Error detecting compression for file %q", path)
|
return nil, errors.Wrapf(err, "detecting compression for file %q", path)
|
||||||
}
|
}
|
||||||
defer stream.Close()
|
defer stream.Close()
|
||||||
if !isCompressed {
|
if !isCompressed {
|
||||||
|
@ -55,7 +55,7 @@ func NewReaderFromStream(sys *types.SystemContext, inputStream io.Reader) (*Read
|
||||||
// Save inputStream to a temporary file
|
// Save inputStream to a temporary file
|
||||||
tarCopyFile, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(sys), "docker-tar")
|
tarCopyFile, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(sys), "docker-tar")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "error creating temporary file")
|
return nil, errors.Wrap(err, "creating temporary file")
|
||||||
}
|
}
|
||||||
defer tarCopyFile.Close()
|
defer tarCopyFile.Close()
|
||||||
|
|
||||||
|
@ -71,7 +71,7 @@ func NewReaderFromStream(sys *types.SystemContext, inputStream io.Reader) (*Read
|
||||||
// giving users really confusing "invalid tar header" errors).
|
// giving users really confusing "invalid tar header" errors).
|
||||||
uncompressedStream, _, err := compression.AutoDecompress(inputStream)
|
uncompressedStream, _, err := compression.AutoDecompress(inputStream)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Error auto-decompressing input")
|
return nil, errors.Wrap(err, "auto-decompressing input")
|
||||||
}
|
}
|
||||||
defer uncompressedStream.Close()
|
defer uncompressedStream.Close()
|
||||||
|
|
||||||
|
@ -80,7 +80,7 @@ func NewReaderFromStream(sys *types.SystemContext, inputStream io.Reader) (*Read
|
||||||
// TODO: This can take quite some time, and should ideally be cancellable
|
// TODO: This can take quite some time, and should ideally be cancellable
|
||||||
// using a context.Context.
|
// using a context.Context.
|
||||||
if _, err := io.Copy(tarCopyFile, uncompressedStream); err != nil {
|
if _, err := io.Copy(tarCopyFile, uncompressedStream); err != nil {
|
||||||
return nil, errors.Wrapf(err, "error copying contents to temporary file %q", tarCopyFile.Name())
|
return nil, errors.Wrapf(err, "copying contents to temporary file %q", tarCopyFile.Name())
|
||||||
}
|
}
|
||||||
succeeded = true
|
succeeded = true
|
||||||
|
|
||||||
|
@ -113,7 +113,7 @@ func newReader(path string, removeOnClose bool) (*Reader, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if err := json.Unmarshal(bytes, &r.Manifest); err != nil {
|
if err := json.Unmarshal(bytes, &r.Manifest); err != nil {
|
||||||
return nil, errors.Wrap(err, "Error decoding tar manifest.json")
|
return nil, errors.Wrap(err, "decoding tar manifest.json")
|
||||||
}
|
}
|
||||||
|
|
||||||
succeeded = true
|
succeeded = true
|
||||||
|
@ -258,7 +258,7 @@ func findTarComponent(inputFile io.Reader, componentPath string) (*tar.Reader, *
|
||||||
func (r *Reader) readTarComponent(path string, limit int) ([]byte, error) {
|
func (r *Reader) readTarComponent(path string, limit int) ([]byte, error) {
|
||||||
file, err := r.openTarComponent(path)
|
file, err := r.openTarComponent(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "Error loading tar component %s", path)
|
return nil, errors.Wrapf(err, "loading tar component %s", path)
|
||||||
}
|
}
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
bytes, err := iolimits.ReadAtMost(file, limit)
|
bytes, err := iolimits.ReadAtMost(file, limit)
|
||||||
|
|
|
@ -80,7 +80,7 @@ func (s *Source) ensureCachedDataIsPresentPrivate() error {
|
||||||
}
|
}
|
||||||
var parsedConfig manifest.Schema2Image // There's a lot of info there, but we only really care about layer DiffIDs.
|
var parsedConfig manifest.Schema2Image // There's a lot of info there, but we only really care about layer DiffIDs.
|
||||||
if err := json.Unmarshal(configBytes, &parsedConfig); err != nil {
|
if err := json.Unmarshal(configBytes, &parsedConfig); err != nil {
|
||||||
return errors.Wrapf(err, "Error decoding tar config %s", tarManifest.Config)
|
return errors.Wrapf(err, "decoding tar config %s", tarManifest.Config)
|
||||||
}
|
}
|
||||||
if parsedConfig.RootFS == nil {
|
if parsedConfig.RootFS == nil {
|
||||||
return errors.Errorf("Invalid image config (rootFS is not set): %s", tarManifest.Config)
|
return errors.Errorf("Invalid image config (rootFS is not set): %s", tarManifest.Config)
|
||||||
|
@ -164,7 +164,7 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif
|
||||||
// the slower method of checking if it's compressed.
|
// the slower method of checking if it's compressed.
|
||||||
uncompressedStream, isCompressed, err := compression.AutoDecompress(t)
|
uncompressedStream, isCompressed, err := compression.AutoDecompress(t)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "Error auto-decompressing %s to determine its size", layerPath)
|
return nil, errors.Wrapf(err, "auto-decompressing %s to determine its size", layerPath)
|
||||||
}
|
}
|
||||||
defer uncompressedStream.Close()
|
defer uncompressedStream.Close()
|
||||||
|
|
||||||
|
@ -172,7 +172,7 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif
|
||||||
if isCompressed {
|
if isCompressed {
|
||||||
uncompressedSize, err = io.Copy(ioutil.Discard, uncompressedStream)
|
uncompressedSize, err = io.Copy(ioutil.Discard, uncompressedStream)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "Error reading %s to find its size", layerPath)
|
return nil, errors.Wrapf(err, "reading %s to find its size", layerPath)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
li.size = uncompressedSize
|
li.size = uncompressedSize
|
||||||
|
@ -292,7 +292,7 @@ func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo, cache types.B
|
||||||
|
|
||||||
uncompressedStream, _, err := compression.AutoDecompress(underlyingStream)
|
uncompressedStream, _, err := compression.AutoDecompress(underlyingStream)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, errors.Wrapf(err, "Error auto-decompressing blob %s", info.Digest)
|
return nil, 0, errors.Wrapf(err, "auto-decompressing blob %s", info.Digest)
|
||||||
}
|
}
|
||||||
|
|
||||||
newStream := uncompressedReadCloser{
|
newStream := uncompressedReadCloser{
|
||||||
|
|
|
@ -94,16 +94,16 @@ func (w *Writer) ensureSingleLegacyLayerLocked(layerID string, layerDigest diges
|
||||||
// See also the comment in physicalLayerPath.
|
// See also the comment in physicalLayerPath.
|
||||||
physicalLayerPath := w.physicalLayerPath(layerDigest)
|
physicalLayerPath := w.physicalLayerPath(layerDigest)
|
||||||
if err := w.sendSymlinkLocked(filepath.Join(layerID, legacyLayerFileName), filepath.Join("..", physicalLayerPath)); err != nil {
|
if err := w.sendSymlinkLocked(filepath.Join(layerID, legacyLayerFileName), filepath.Join("..", physicalLayerPath)); err != nil {
|
||||||
return errors.Wrap(err, "Error creating layer symbolic link")
|
return errors.Wrap(err, "creating layer symbolic link")
|
||||||
}
|
}
|
||||||
|
|
||||||
b := []byte("1.0")
|
b := []byte("1.0")
|
||||||
if err := w.sendBytesLocked(filepath.Join(layerID, legacyVersionFileName), b); err != nil {
|
if err := w.sendBytesLocked(filepath.Join(layerID, legacyVersionFileName), b); err != nil {
|
||||||
return errors.Wrap(err, "Error writing VERSION file")
|
return errors.Wrap(err, "writing VERSION file")
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := w.sendBytesLocked(filepath.Join(layerID, legacyConfigFileName), configBytes); err != nil {
|
if err := w.sendBytesLocked(filepath.Join(layerID, legacyConfigFileName), configBytes); err != nil {
|
||||||
return errors.Wrap(err, "Error writing config json file")
|
return errors.Wrap(err, "writing config json file")
|
||||||
}
|
}
|
||||||
|
|
||||||
w.legacyLayers[layerID] = struct{}{}
|
w.legacyLayers[layerID] = struct{}{}
|
||||||
|
@ -128,7 +128,7 @@ func (w *Writer) writeLegacyMetadataLocked(layerDescriptors []manifest.Schema2De
|
||||||
var config map[string]*json.RawMessage
|
var config map[string]*json.RawMessage
|
||||||
err := json.Unmarshal(configBytes, &config)
|
err := json.Unmarshal(configBytes, &config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "Error unmarshaling config")
|
return errors.Wrap(err, "unmarshaling config")
|
||||||
}
|
}
|
||||||
for _, attr := range [7]string{"architecture", "config", "container", "container_config", "created", "docker_version", "os"} {
|
for _, attr := range [7]string{"architecture", "config", "container", "container_config", "created", "docker_version", "os"} {
|
||||||
layerConfig[attr] = config[attr]
|
layerConfig[attr] = config[attr]
|
||||||
|
@ -152,7 +152,7 @@ func (w *Writer) writeLegacyMetadataLocked(layerDescriptors []manifest.Schema2De
|
||||||
layerConfig["layer_id"] = chainID
|
layerConfig["layer_id"] = chainID
|
||||||
b, err := json.Marshal(layerConfig) // Note that layerConfig["id"] is not set yet at this point.
|
b, err := json.Marshal(layerConfig) // Note that layerConfig["id"] is not set yet at this point.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "Error marshaling layer config")
|
return errors.Wrap(err, "marshaling layer config")
|
||||||
}
|
}
|
||||||
delete(layerConfig, "layer_id")
|
delete(layerConfig, "layer_id")
|
||||||
layerID := digest.Canonical.FromBytes(b).Hex()
|
layerID := digest.Canonical.FromBytes(b).Hex()
|
||||||
|
@ -160,7 +160,7 @@ func (w *Writer) writeLegacyMetadataLocked(layerDescriptors []manifest.Schema2De
|
||||||
|
|
||||||
configBytes, err := json.Marshal(layerConfig)
|
configBytes, err := json.Marshal(layerConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "Error marshaling layer config")
|
return errors.Wrap(err, "marshaling layer config")
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := w.ensureSingleLegacyLayerLocked(layerID, l.Digest, configBytes); err != nil {
|
if err := w.ensureSingleLegacyLayerLocked(layerID, l.Digest, configBytes); err != nil {
|
||||||
|
@ -280,10 +280,10 @@ func (w *Writer) Close() error {
|
||||||
|
|
||||||
b, err = json.Marshal(w.repositories)
|
b, err = json.Marshal(w.repositories)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "Error marshaling repositories")
|
return errors.Wrap(err, "marshaling repositories")
|
||||||
}
|
}
|
||||||
if err := w.sendBytesLocked(legacyRepositoriesFileName, b); err != nil {
|
if err := w.sendBytesLocked(legacyRepositoriesFileName, b); err != nil {
|
||||||
return errors.Wrap(err, "Error writing config json file")
|
return errors.Wrap(err, "writing config json file")
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := w.tar.Close(); err != nil {
|
if err := w.tar.Close(); err != nil {
|
||||||
|
|
|
@ -154,7 +154,7 @@ func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) {
|
||||||
var config registryConfiguration
|
var config registryConfiguration
|
||||||
err = yaml.Unmarshal(configBytes, &config)
|
err = yaml.Unmarshal(configBytes, &config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "Error parsing %s", configPath)
|
return nil, errors.Wrapf(err, "parsing %s", configPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
if config.DefaultDocker != nil {
|
if config.DefaultDocker != nil {
|
||||||
|
|
|
@ -11,20 +11,20 @@ import (
|
||||||
func manifestSchema2FromManifestList(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte) (genericManifest, error) {
|
func manifestSchema2FromManifestList(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte) (genericManifest, error) {
|
||||||
list, err := manifest.Schema2ListFromManifest(manblob)
|
list, err := manifest.Schema2ListFromManifest(manblob)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "Error parsing schema2 manifest list")
|
return nil, errors.Wrapf(err, "parsing schema2 manifest list")
|
||||||
}
|
}
|
||||||
targetManifestDigest, err := list.ChooseInstance(sys)
|
targetManifestDigest, err := list.ChooseInstance(sys)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "Error choosing image instance")
|
return nil, errors.Wrapf(err, "choosing image instance")
|
||||||
}
|
}
|
||||||
manblob, mt, err := src.GetManifest(ctx, &targetManifestDigest)
|
manblob, mt, err := src.GetManifest(ctx, &targetManifestDigest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "Error loading manifest for target platform")
|
return nil, errors.Wrapf(err, "loading manifest for target platform")
|
||||||
}
|
}
|
||||||
|
|
||||||
matches, err := manifest.MatchesDigest(manblob, targetManifestDigest)
|
matches, err := manifest.MatchesDigest(manblob, targetManifestDigest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Error computing manifest digest")
|
return nil, errors.Wrap(err, "computing manifest digest")
|
||||||
}
|
}
|
||||||
if !matches {
|
if !matches {
|
||||||
return nil, errors.Errorf("Image manifest does not match selected manifest digest %s", targetManifestDigest)
|
return nil, errors.Errorf("Image manifest does not match selected manifest digest %s", targetManifestDigest)
|
||||||
|
|
|
@ -289,7 +289,7 @@ func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, options
|
||||||
// and anyway this blob is so small that it’s easier to just copy it than to worry about figuring out another location where to get it.
|
// and anyway this blob is so small that it’s easier to just copy it than to worry about figuring out another location where to get it.
|
||||||
info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), emptyLayerBlobInfo, none.NoCache, false)
|
info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), emptyLayerBlobInfo, none.NoCache, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Error uploading empty layer")
|
return nil, errors.Wrap(err, "uploading empty layer")
|
||||||
}
|
}
|
||||||
if info.Digest != emptyLayerBlobInfo.Digest {
|
if info.Digest != emptyLayerBlobInfo.Digest {
|
||||||
return nil, errors.Errorf("Internal error: Uploaded empty layer has digest %#v instead of %s", info.Digest, emptyLayerBlobInfo.Digest)
|
return nil, errors.Errorf("Internal error: Uploaded empty layer has digest %#v instead of %s", info.Digest, emptyLayerBlobInfo.Digest)
|
||||||
|
|
|
@ -11,20 +11,20 @@ import (
|
||||||
func manifestOCI1FromImageIndex(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte) (genericManifest, error) {
|
func manifestOCI1FromImageIndex(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte) (genericManifest, error) {
|
||||||
index, err := manifest.OCI1IndexFromManifest(manblob)
|
index, err := manifest.OCI1IndexFromManifest(manblob)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "Error parsing OCI1 index")
|
return nil, errors.Wrapf(err, "parsing OCI1 index")
|
||||||
}
|
}
|
||||||
targetManifestDigest, err := index.ChooseInstance(sys)
|
targetManifestDigest, err := index.ChooseInstance(sys)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "Error choosing image instance")
|
return nil, errors.Wrapf(err, "choosing image instance")
|
||||||
}
|
}
|
||||||
manblob, mt, err := src.GetManifest(ctx, &targetManifestDigest)
|
manblob, mt, err := src.GetManifest(ctx, &targetManifestDigest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "Error loading manifest for target platform")
|
return nil, errors.Wrapf(err, "loading manifest for target platform")
|
||||||
}
|
}
|
||||||
|
|
||||||
matches, err := manifest.MatchesDigest(manblob, targetManifestDigest)
|
matches, err := manifest.MatchesDigest(manblob, targetManifestDigest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Error computing manifest digest")
|
return nil, errors.Wrap(err, "computing manifest digest")
|
||||||
}
|
}
|
||||||
if !matches {
|
if !matches {
|
||||||
return nil, errors.Errorf("Image manifest does not match selected manifest digest %s", targetManifestDigest)
|
return nil, errors.Errorf("Image manifest does not match selected manifest digest %s", targetManifestDigest)
|
||||||
|
|
|
@ -53,7 +53,7 @@ func (i *UnparsedImage) Manifest(ctx context.Context) ([]byte, string, error) {
|
||||||
if digest, haveDigest := i.expectedManifestDigest(); haveDigest {
|
if digest, haveDigest := i.expectedManifestDigest(); haveDigest {
|
||||||
matches, err := manifest.MatchesDigest(m, digest)
|
matches, err := manifest.MatchesDigest(m, digest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", errors.Wrap(err, "Error computing manifest digest")
|
return nil, "", errors.Wrap(err, "computing manifest digest")
|
||||||
}
|
}
|
||||||
if !matches {
|
if !matches {
|
||||||
return nil, "", errors.Errorf("Manifest does not match provided manifest digest %s", digest)
|
return nil, "", errors.Errorf("Manifest does not match provided manifest digest %s", digest)
|
||||||
|
|
|
@ -58,3 +58,33 @@ type TryReusingBlobOptions struct {
|
||||||
// The reference of the image that contains the target blob.
|
// The reference of the image that contains the target blob.
|
||||||
SrcRef reference.Named
|
SrcRef reference.Named
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ImageSourceChunk is a portion of a blob.
|
||||||
|
// This API is experimental and can be changed without bumping the major version number.
|
||||||
|
type ImageSourceChunk struct {
|
||||||
|
Offset uint64
|
||||||
|
Length uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// ImageSourceSeekable is an image source that permits to fetch chunks of the entire blob.
|
||||||
|
// This API is experimental and can be changed without bumping the major version number.
|
||||||
|
type ImageSourceSeekable interface {
|
||||||
|
// GetBlobAt returns a stream for the specified blob.
|
||||||
|
GetBlobAt(context.Context, publicTypes.BlobInfo, []ImageSourceChunk) (chan io.ReadCloser, chan error, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ImageDestinationPartial is a service to store a blob by requesting the missing chunks to a ImageSourceSeekable.
|
||||||
|
// This API is experimental and can be changed without bumping the major version number.
|
||||||
|
type ImageDestinationPartial interface {
|
||||||
|
// PutBlobPartial writes contents of stream and returns data representing the result.
|
||||||
|
PutBlobPartial(ctx context.Context, stream ImageSourceSeekable, srcInfo publicTypes.BlobInfo, cache publicTypes.BlobInfoCache) (publicTypes.BlobInfo, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BadPartialRequestError is returned by ImageSourceSeekable.GetBlobAt on an invalid request.
|
||||||
|
type BadPartialRequestError struct {
|
||||||
|
Status string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e BadPartialRequestError) Error() string {
|
||||||
|
return e.Status
|
||||||
|
}
|
||||||
|
|
|
@ -68,7 +68,7 @@ func compressionVariantMIMEType(variantTable []compressionMIMETypeSet, mimeType
|
||||||
if mt == mimeType { // Found the variant
|
if mt == mimeType { // Found the variant
|
||||||
name := mtsUncompressed
|
name := mtsUncompressed
|
||||||
if algorithm != nil {
|
if algorithm != nil {
|
||||||
name = algorithm.Name()
|
name = algorithm.InternalUnstableUndocumentedMIMEQuestionMark()
|
||||||
}
|
}
|
||||||
if res, ok := variants[name]; ok {
|
if res, ok := variants[name]; ok {
|
||||||
if res != mtsUnsupportedMIMEType {
|
if res != mtsUnsupportedMIMEType {
|
||||||
|
|
|
@ -109,7 +109,7 @@ func (m *Schema1) initialize() error {
|
||||||
m.ExtractedV1Compatibility = make([]Schema1V1Compatibility, len(m.History))
|
m.ExtractedV1Compatibility = make([]Schema1V1Compatibility, len(m.History))
|
||||||
for i, h := range m.History {
|
for i, h := range m.History {
|
||||||
if err := json.Unmarshal([]byte(h.V1Compatibility), &m.ExtractedV1Compatibility[i]); err != nil {
|
if err := json.Unmarshal([]byte(h.V1Compatibility), &m.ExtractedV1Compatibility[i]); err != nil {
|
||||||
return errors.Wrapf(err, "Error parsing v2s1 history entry %d", i)
|
return errors.Wrapf(err, "parsing v2s1 history entry %d", i)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -242,14 +242,14 @@ func (m *Schema1) ToSchema2Config(diffIDs []digest.Digest) ([]byte, error) {
|
||||||
config := []byte(m.History[0].V1Compatibility)
|
config := []byte(m.History[0].V1Compatibility)
|
||||||
err := json.Unmarshal(config, &s1)
|
err := json.Unmarshal(config, &s1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "error decoding configuration")
|
return nil, errors.Wrapf(err, "decoding configuration")
|
||||||
}
|
}
|
||||||
// Images created with versions prior to 1.8.3 require us to re-encode the encoded object,
|
// Images created with versions prior to 1.8.3 require us to re-encode the encoded object,
|
||||||
// adding some fields that aren't "omitempty".
|
// adding some fields that aren't "omitempty".
|
||||||
if s1.DockerVersion != "" && versions.LessThan(s1.DockerVersion, "1.8.3") {
|
if s1.DockerVersion != "" && versions.LessThan(s1.DockerVersion, "1.8.3") {
|
||||||
config, err = json.Marshal(&s1)
|
config, err = json.Marshal(&s1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "error re-encoding compat image config %#v", s1)
|
return nil, errors.Wrapf(err, "re-encoding compat image config %#v", s1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Build the history.
|
// Build the history.
|
||||||
|
@ -276,7 +276,7 @@ func (m *Schema1) ToSchema2Config(diffIDs []digest.Digest) ([]byte, error) {
|
||||||
raw := make(map[string]*json.RawMessage)
|
raw := make(map[string]*json.RawMessage)
|
||||||
err = json.Unmarshal(config, &raw)
|
err = json.Unmarshal(config, &raw)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "error re-decoding compat image config %#v", s1)
|
return nil, errors.Wrapf(err, "re-decoding compat image config %#v", s1)
|
||||||
}
|
}
|
||||||
// Drop some fields.
|
// Drop some fields.
|
||||||
delete(raw, "id")
|
delete(raw, "id")
|
||||||
|
|
|
@ -242,7 +242,7 @@ func (m *Schema2) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
|
||||||
}
|
}
|
||||||
mimeType, err := updatedMIMEType(schema2CompressionMIMETypeSets, mimeType, info)
|
mimeType, err := updatedMIMEType(schema2CompressionMIMETypeSets, mimeType, info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "Error preparing updated manifest, layer %q", info.Digest)
|
return errors.Wrapf(err, "preparing updated manifest, layer %q", info.Digest)
|
||||||
}
|
}
|
||||||
m.LayersDescriptors[i].MediaType = mimeType
|
m.LayersDescriptors[i].MediaType = mimeType
|
||||||
m.LayersDescriptors[i].Digest = info.Digest
|
m.LayersDescriptors[i].Digest = info.Digest
|
||||||
|
|
|
@ -91,7 +91,7 @@ func (list *Schema2List) UpdateInstances(updates []ListUpdate) error {
|
||||||
func (list *Schema2List) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) {
|
func (list *Schema2List) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) {
|
||||||
wantedPlatforms, err := platform.WantedPlatforms(ctx)
|
wantedPlatforms, err := platform.WantedPlatforms(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.Wrapf(err, "error getting platform information %#v", ctx)
|
return "", errors.Wrapf(err, "getting platform information %#v", ctx)
|
||||||
}
|
}
|
||||||
for _, wantedPlatform := range wantedPlatforms {
|
for _, wantedPlatform := range wantedPlatforms {
|
||||||
for _, d := range list.Manifests {
|
for _, d := range list.Manifests {
|
||||||
|
@ -115,7 +115,7 @@ func (list *Schema2List) ChooseInstance(ctx *types.SystemContext) (digest.Digest
|
||||||
func (list *Schema2List) Serialize() ([]byte, error) {
|
func (list *Schema2List) Serialize() ([]byte, error) {
|
||||||
buf, err := json.Marshal(list)
|
buf, err := json.Marshal(list)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "error marshaling Schema2List %#v", list)
|
return nil, errors.Wrapf(err, "marshaling Schema2List %#v", list)
|
||||||
}
|
}
|
||||||
return buf, nil
|
return buf, nil
|
||||||
}
|
}
|
||||||
|
@ -190,7 +190,7 @@ func Schema2ListFromManifest(manifest []byte) (*Schema2List, error) {
|
||||||
Manifests: []Schema2ManifestDescriptor{},
|
Manifests: []Schema2ManifestDescriptor{},
|
||||||
}
|
}
|
||||||
if err := json.Unmarshal(manifest, &list); err != nil {
|
if err := json.Unmarshal(manifest, &list); err != nil {
|
||||||
return nil, errors.Wrapf(err, "error unmarshaling Schema2List %q", string(manifest))
|
return nil, errors.Wrapf(err, "unmarshaling Schema2List %q", string(manifest))
|
||||||
}
|
}
|
||||||
return &list, nil
|
return &list, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -195,7 +195,7 @@ func MatchesDigest(manifest []byte, expectedDigest digest.Digest) (bool, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddDummyV2S1Signature adds an JWS signature with a temporary key (i.e. useless) to a v2s1 manifest.
|
// AddDummyV2S1Signature adds an JWS signature with a temporary key (i.e. useless) to a v2s1 manifest.
|
||||||
// This is useful to make the manifest acceptable to a Docker Registry (even though nothing needs or wants the JWS signature).
|
// This is useful to make the manifest acceptable to a docker/distribution registry (even though nothing needs or wants the JWS signature).
|
||||||
func AddDummyV2S1Signature(manifest []byte) ([]byte, error) {
|
func AddDummyV2S1Signature(manifest []byte) ([]byte, error) {
|
||||||
key, err := libtrust.GenerateECP256PrivateKey()
|
key, err := libtrust.GenerateECP256PrivateKey()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -127,7 +127,7 @@ func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
|
||||||
}
|
}
|
||||||
mimeType, err := updatedMIMEType(oci1CompressionMIMETypeSets, mimeType, info)
|
mimeType, err := updatedMIMEType(oci1CompressionMIMETypeSets, mimeType, info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "Error preparing updated manifest, layer %q", info.Digest)
|
return errors.Wrapf(err, "preparing updated manifest, layer %q", info.Digest)
|
||||||
}
|
}
|
||||||
if info.CryptoOperation == types.Encrypt {
|
if info.CryptoOperation == types.Encrypt {
|
||||||
encMediaType, err := getEncryptedMediaType(mimeType)
|
encMediaType, err := getEncryptedMediaType(mimeType)
|
||||||
|
|
|
@ -75,7 +75,7 @@ func (index *OCI1Index) UpdateInstances(updates []ListUpdate) error {
|
||||||
func (index *OCI1Index) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) {
|
func (index *OCI1Index) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) {
|
||||||
wantedPlatforms, err := platform.WantedPlatforms(ctx)
|
wantedPlatforms, err := platform.WantedPlatforms(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.Wrapf(err, "error getting platform information %#v", ctx)
|
return "", errors.Wrapf(err, "getting platform information %#v", ctx)
|
||||||
}
|
}
|
||||||
for _, wantedPlatform := range wantedPlatforms {
|
for _, wantedPlatform := range wantedPlatforms {
|
||||||
for _, d := range index.Manifests {
|
for _, d := range index.Manifests {
|
||||||
|
@ -108,7 +108,7 @@ func (index *OCI1Index) ChooseInstance(ctx *types.SystemContext) (digest.Digest,
|
||||||
func (index *OCI1Index) Serialize() ([]byte, error) {
|
func (index *OCI1Index) Serialize() ([]byte, error) {
|
||||||
buf, err := json.Marshal(index)
|
buf, err := json.Marshal(index)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "error marshaling OCI1Index %#v", index)
|
return nil, errors.Wrapf(err, "marshaling OCI1Index %#v", index)
|
||||||
}
|
}
|
||||||
return buf, nil
|
return buf, nil
|
||||||
}
|
}
|
||||||
|
@ -200,7 +200,7 @@ func OCI1IndexFromManifest(manifest []byte) (*OCI1Index, error) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
if err := json.Unmarshal(manifest, &index); err != nil {
|
if err := json.Unmarshal(manifest, &index); err != nil {
|
||||||
return nil, errors.Wrapf(err, "error unmarshaling OCI1Index %q", string(manifest))
|
return nil, errors.Wrapf(err, "unmarshaling OCI1Index %q", string(manifest))
|
||||||
}
|
}
|
||||||
return &index, nil
|
return &index, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,12 +22,12 @@ type ociArchiveImageDestination struct {
|
||||||
func newImageDestination(ctx context.Context, sys *types.SystemContext, ref ociArchiveReference) (types.ImageDestination, error) {
|
func newImageDestination(ctx context.Context, sys *types.SystemContext, ref ociArchiveReference) (types.ImageDestination, error) {
|
||||||
tempDirRef, err := createOCIRef(sys, ref.image)
|
tempDirRef, err := createOCIRef(sys, ref.image)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "error creating oci reference")
|
return nil, errors.Wrapf(err, "creating oci reference")
|
||||||
}
|
}
|
||||||
unpackedDest, err := tempDirRef.ociRefExtracted.NewImageDestination(ctx, sys)
|
unpackedDest, err := tempDirRef.ociRefExtracted.NewImageDestination(ctx, sys)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err := tempDirRef.deleteTempDir(); err != nil {
|
if err := tempDirRef.deleteTempDir(); err != nil {
|
||||||
return nil, errors.Wrapf(err, "error deleting temp directory %q", tempDirRef.tempDirectory)
|
return nil, errors.Wrapf(err, "deleting temp directory %q", tempDirRef.tempDirectory)
|
||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -129,10 +129,13 @@ func (d *ociArchiveImageDestination) PutSignatures(ctx context.Context, signatur
|
||||||
}
|
}
|
||||||
|
|
||||||
// Commit marks the process of storing the image as successful and asks for the image to be persisted
|
// Commit marks the process of storing the image as successful and asks for the image to be persisted
|
||||||
|
// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
|
||||||
|
// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
|
||||||
|
// original manifest list digest, if desired.
|
||||||
// after the directory is made, it is tarred up into a file and the directory is deleted
|
// after the directory is made, it is tarred up into a file and the directory is deleted
|
||||||
func (d *ociArchiveImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
|
func (d *ociArchiveImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
|
||||||
if err := d.unpackedDest.Commit(ctx, unparsedToplevel); err != nil {
|
if err := d.unpackedDest.Commit(ctx, unparsedToplevel); err != nil {
|
||||||
return errors.Wrapf(err, "error storing image %q", d.ref.image)
|
return errors.Wrapf(err, "storing image %q", d.ref.image)
|
||||||
}
|
}
|
||||||
|
|
||||||
// path of directory to tar up
|
// path of directory to tar up
|
||||||
|
@ -147,13 +150,13 @@ func tarDirectory(src, dst string) error {
|
||||||
// input is a stream of bytes from the archive of the directory at path
|
// input is a stream of bytes from the archive of the directory at path
|
||||||
input, err := archive.Tar(src, archive.Uncompressed)
|
input, err := archive.Tar(src, archive.Uncompressed)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "error retrieving stream of bytes from %q", src)
|
return errors.Wrapf(err, "retrieving stream of bytes from %q", src)
|
||||||
}
|
}
|
||||||
|
|
||||||
// creates the tar file
|
// creates the tar file
|
||||||
outFile, err := os.Create(dst)
|
outFile, err := os.Create(dst)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "error creating tar file %q", dst)
|
return errors.Wrapf(err, "creating tar file %q", dst)
|
||||||
}
|
}
|
||||||
defer outFile.Close()
|
defer outFile.Close()
|
||||||
|
|
||||||
|
|
|
@ -23,13 +23,13 @@ type ociArchiveImageSource struct {
|
||||||
func newImageSource(ctx context.Context, sys *types.SystemContext, ref ociArchiveReference) (types.ImageSource, error) {
|
func newImageSource(ctx context.Context, sys *types.SystemContext, ref ociArchiveReference) (types.ImageSource, error) {
|
||||||
tempDirRef, err := createUntarTempDir(sys, ref)
|
tempDirRef, err := createUntarTempDir(sys, ref)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "error creating temp directory")
|
return nil, errors.Wrap(err, "creating temp directory")
|
||||||
}
|
}
|
||||||
|
|
||||||
unpackedSrc, err := tempDirRef.ociRefExtracted.NewImageSource(ctx, sys)
|
unpackedSrc, err := tempDirRef.ociRefExtracted.NewImageSource(ctx, sys)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err := tempDirRef.deleteTempDir(); err != nil {
|
if err := tempDirRef.deleteTempDir(); err != nil {
|
||||||
return nil, errors.Wrapf(err, "error deleting temp directory %q", tempDirRef.tempDirectory)
|
return nil, errors.Wrapf(err, "deleting temp directory %q", tempDirRef.tempDirectory)
|
||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -52,7 +52,7 @@ func LoadManifestDescriptorWithContext(sys *types.SystemContext, imgRef types.Im
|
||||||
}
|
}
|
||||||
tempDirRef, err := createUntarTempDir(sys, ociArchRef)
|
tempDirRef, err := createUntarTempDir(sys, ociArchRef)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return imgspecv1.Descriptor{}, errors.Wrap(err, "error creating temp directory")
|
return imgspecv1.Descriptor{}, errors.Wrap(err, "creating temp directory")
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
err := tempDirRef.deleteTempDir()
|
err := tempDirRef.deleteTempDir()
|
||||||
|
@ -61,7 +61,7 @@ func LoadManifestDescriptorWithContext(sys *types.SystemContext, imgRef types.Im
|
||||||
|
|
||||||
descriptor, err := ocilayout.LoadManifestDescriptor(tempDirRef.ociRefExtracted)
|
descriptor, err := ocilayout.LoadManifestDescriptor(tempDirRef.ociRefExtracted)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return imgspecv1.Descriptor{}, errors.Wrap(err, "error loading index")
|
return imgspecv1.Descriptor{}, errors.Wrap(err, "loading index")
|
||||||
}
|
}
|
||||||
return descriptor, nil
|
return descriptor, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -163,7 +163,7 @@ func (t *tempDirOCIRef) deleteTempDir() error {
|
||||||
func createOCIRef(sys *types.SystemContext, image string) (tempDirOCIRef, error) {
|
func createOCIRef(sys *types.SystemContext, image string) (tempDirOCIRef, error) {
|
||||||
dir, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(sys), "oci")
|
dir, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(sys), "oci")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return tempDirOCIRef{}, errors.Wrapf(err, "error creating temp directory")
|
return tempDirOCIRef{}, errors.Wrapf(err, "creating temp directory")
|
||||||
}
|
}
|
||||||
ociRef, err := ocilayout.NewReference(dir, image)
|
ociRef, err := ocilayout.NewReference(dir, image)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -178,7 +178,7 @@ func createOCIRef(sys *types.SystemContext, image string) (tempDirOCIRef, error)
|
||||||
func createUntarTempDir(sys *types.SystemContext, ref ociArchiveReference) (tempDirOCIRef, error) {
|
func createUntarTempDir(sys *types.SystemContext, ref ociArchiveReference) (tempDirOCIRef, error) {
|
||||||
tempDirRef, err := createOCIRef(sys, ref.image)
|
tempDirRef, err := createOCIRef(sys, ref.image)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return tempDirOCIRef{}, errors.Wrap(err, "error creating oci reference")
|
return tempDirOCIRef{}, errors.Wrap(err, "creating oci reference")
|
||||||
}
|
}
|
||||||
src := ref.resolvedFile
|
src := ref.resolvedFile
|
||||||
dst := tempDirRef.tempDirectory
|
dst := tempDirRef.tempDirectory
|
||||||
|
@ -190,9 +190,9 @@ func createUntarTempDir(sys *types.SystemContext, ref ociArchiveReference) (temp
|
||||||
defer arch.Close()
|
defer arch.Close()
|
||||||
if err := archive.NewDefaultArchiver().Untar(arch, dst, &archive.TarOptions{NoLchown: true}); err != nil {
|
if err := archive.NewDefaultArchiver().Untar(arch, dst, &archive.TarOptions{NoLchown: true}); err != nil {
|
||||||
if err := tempDirRef.deleteTempDir(); err != nil {
|
if err := tempDirRef.deleteTempDir(); err != nil {
|
||||||
return tempDirOCIRef{}, errors.Wrapf(err, "error deleting temp directory %q", tempDirRef.tempDirectory)
|
return tempDirOCIRef{}, errors.Wrapf(err, "deleting temp directory %q", tempDirRef.tempDirectory)
|
||||||
}
|
}
|
||||||
return tempDirOCIRef{}, errors.Wrapf(err, "error untarring file %q", tempDirRef.tempDirectory)
|
return tempDirOCIRef{}, errors.Wrapf(err, "untarring file %q", tempDirRef.tempDirectory)
|
||||||
}
|
}
|
||||||
return tempDirRef, nil
|
return tempDirRef, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -303,6 +303,9 @@ func (d *ociImageDestination) PutSignatures(ctx context.Context, signatures [][]
|
||||||
}
|
}
|
||||||
|
|
||||||
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
|
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
|
||||||
|
// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
|
||||||
|
// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
|
||||||
|
// original manifest list digest, if desired.
|
||||||
// WARNING: This does not have any transactional semantics:
|
// WARNING: This does not have any transactional semantics:
|
||||||
// - Uploaded data MAY be visible to others before Commit() is called
|
// - Uploaded data MAY be visible to others before Commit() is called
|
||||||
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
|
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
|
||||||
|
|
|
@ -579,7 +579,7 @@ func (rules *clientConfigLoadingRules) Load() (*clientcmdConfig, error) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errlist = append(errlist, errors.Wrapf(err, "Error loading config file \"%s\"", filename))
|
errlist = append(errlist, errors.Wrapf(err, "loading config file \"%s\"", filename))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -164,7 +164,7 @@ type openshiftImageSource struct {
|
||||||
// Values specific to this image
|
// Values specific to this image
|
||||||
sys *types.SystemContext
|
sys *types.SystemContext
|
||||||
// State
|
// State
|
||||||
docker types.ImageSource // The Docker Registry endpoint, or nil if not resolved yet
|
docker types.ImageSource // The docker/distribution API endpoint, or nil if not resolved yet
|
||||||
imageStreamImageName string // Resolved image identifier, or "" if not known yet
|
imageStreamImageName string // Resolved image identifier, or "" if not known yet
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -316,7 +316,7 @@ func (s *openshiftImageSource) ensureImageIsResolved(ctx context.Context) error
|
||||||
|
|
||||||
type openshiftImageDestination struct {
|
type openshiftImageDestination struct {
|
||||||
client *openshiftClient
|
client *openshiftClient
|
||||||
docker types.ImageDestination // The Docker Registry endpoint
|
docker types.ImageDestination // The docker/distribution API endpoint
|
||||||
// State
|
// State
|
||||||
imageStreamImageName string // "" if not yet known
|
imageStreamImageName string // "" if not yet known
|
||||||
}
|
}
|
||||||
|
@ -435,14 +435,14 @@ func (d *openshiftImageDestination) PutManifest(ctx context.Context, m []byte, i
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *openshiftImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
|
func (d *openshiftImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
|
||||||
var imageStreamName string
|
var imageStreamImageName string
|
||||||
if instanceDigest == nil {
|
if instanceDigest == nil {
|
||||||
if d.imageStreamImageName == "" {
|
if d.imageStreamImageName == "" {
|
||||||
return errors.Errorf("Internal error: Unknown manifest digest, can't add signatures")
|
return errors.Errorf("Internal error: Unknown manifest digest, can't add signatures")
|
||||||
}
|
}
|
||||||
imageStreamName = d.imageStreamImageName
|
imageStreamImageName = d.imageStreamImageName
|
||||||
} else {
|
} else {
|
||||||
imageStreamName = instanceDigest.String()
|
imageStreamImageName = instanceDigest.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Because image signatures are a shared resource in Atomic Registry, the default upload
|
// Because image signatures are a shared resource in Atomic Registry, the default upload
|
||||||
|
@ -452,7 +452,7 @@ func (d *openshiftImageDestination) PutSignatures(ctx context.Context, signature
|
||||||
return nil // No need to even read the old state.
|
return nil // No need to even read the old state.
|
||||||
}
|
}
|
||||||
|
|
||||||
image, err := d.client.getImage(ctx, imageStreamName)
|
image, err := d.client.getImage(ctx, imageStreamImageName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -475,9 +475,9 @@ sigExists:
|
||||||
randBytes := make([]byte, 16)
|
randBytes := make([]byte, 16)
|
||||||
n, err := rand.Read(randBytes)
|
n, err := rand.Read(randBytes)
|
||||||
if err != nil || n != 16 {
|
if err != nil || n != 16 {
|
||||||
return errors.Wrapf(err, "Error generating random signature len %d", n)
|
return errors.Wrapf(err, "generating random signature len %d", n)
|
||||||
}
|
}
|
||||||
signatureName = fmt.Sprintf("%s@%032x", imageStreamName, randBytes)
|
signatureName = fmt.Sprintf("%s@%032x", imageStreamImageName, randBytes)
|
||||||
if _, ok := existingSigNames[signatureName]; !ok {
|
if _, ok := existingSigNames[signatureName]; !ok {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
@ -506,6 +506,9 @@ sigExists:
|
||||||
}
|
}
|
||||||
|
|
||||||
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
|
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
|
||||||
|
// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
|
||||||
|
// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
|
||||||
|
// original manifest list digest, if desired.
|
||||||
// WARNING: This does not have any transactional semantics:
|
// WARNING: This does not have any transactional semantics:
|
||||||
// - Uploaded data MAY be visible to others before Commit() is called
|
// - Uploaded data MAY be visible to others before Commit() is called
|
||||||
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
|
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
|
||||||
|
|
|
@ -9,6 +9,7 @@ import (
|
||||||
|
|
||||||
"github.com/containers/image/v5/pkg/compression/internal"
|
"github.com/containers/image/v5/pkg/compression/internal"
|
||||||
"github.com/containers/image/v5/pkg/compression/types"
|
"github.com/containers/image/v5/pkg/compression/types"
|
||||||
|
"github.com/containers/storage/pkg/chunked"
|
||||||
"github.com/klauspost/pgzip"
|
"github.com/klauspost/pgzip"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
@ -20,19 +21,22 @@ type Algorithm = types.Algorithm
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// Gzip compression.
|
// Gzip compression.
|
||||||
Gzip = internal.NewAlgorithm("gzip", []byte{0x1F, 0x8B, 0x08}, GzipDecompressor, gzipCompressor)
|
Gzip = internal.NewAlgorithm("gzip", "gzip", []byte{0x1F, 0x8B, 0x08}, GzipDecompressor, gzipCompressor)
|
||||||
// Bzip2 compression.
|
// Bzip2 compression.
|
||||||
Bzip2 = internal.NewAlgorithm("bzip2", []byte{0x42, 0x5A, 0x68}, Bzip2Decompressor, bzip2Compressor)
|
Bzip2 = internal.NewAlgorithm("bzip2", "bzip2", []byte{0x42, 0x5A, 0x68}, Bzip2Decompressor, bzip2Compressor)
|
||||||
// Xz compression.
|
// Xz compression.
|
||||||
Xz = internal.NewAlgorithm("Xz", []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, XzDecompressor, xzCompressor)
|
Xz = internal.NewAlgorithm("Xz", "xz", []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, XzDecompressor, xzCompressor)
|
||||||
// Zstd compression.
|
// Zstd compression.
|
||||||
Zstd = internal.NewAlgorithm("zstd", []byte{0x28, 0xb5, 0x2f, 0xfd}, ZstdDecompressor, zstdCompressor)
|
Zstd = internal.NewAlgorithm("zstd", "zstd", []byte{0x28, 0xb5, 0x2f, 0xfd}, ZstdDecompressor, zstdCompressor)
|
||||||
|
// Zstd:chunked compression.
|
||||||
|
ZstdChunked = internal.NewAlgorithm("zstd:chunked", "zstd", []byte{0x28, 0xb5, 0x2f, 0xfd}, ZstdDecompressor, chunked.ZstdCompressor)
|
||||||
|
|
||||||
compressionAlgorithms = map[string]Algorithm{
|
compressionAlgorithms = map[string]Algorithm{
|
||||||
Gzip.Name(): Gzip,
|
Gzip.Name(): Gzip,
|
||||||
Bzip2.Name(): Bzip2,
|
Bzip2.Name(): Bzip2,
|
||||||
Xz.Name(): Xz,
|
Xz.Name(): Xz,
|
||||||
Zstd.Name(): Zstd,
|
Zstd.Name(): Zstd,
|
||||||
|
ZstdChunked.Name(): ZstdChunked,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -69,7 +73,7 @@ func XzDecompressor(r io.Reader) (io.ReadCloser, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// gzipCompressor is a CompressorFunc for the gzip compression algorithm.
|
// gzipCompressor is a CompressorFunc for the gzip compression algorithm.
|
||||||
func gzipCompressor(r io.Writer, level *int) (io.WriteCloser, error) {
|
func gzipCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) {
|
||||||
if level != nil {
|
if level != nil {
|
||||||
return pgzip.NewWriterLevel(r, *level)
|
return pgzip.NewWriterLevel(r, *level)
|
||||||
}
|
}
|
||||||
|
@ -77,18 +81,25 @@ func gzipCompressor(r io.Writer, level *int) (io.WriteCloser, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// bzip2Compressor is a CompressorFunc for the bzip2 compression algorithm.
|
// bzip2Compressor is a CompressorFunc for the bzip2 compression algorithm.
|
||||||
func bzip2Compressor(r io.Writer, level *int) (io.WriteCloser, error) {
|
func bzip2Compressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) {
|
||||||
return nil, fmt.Errorf("bzip2 compression not supported")
|
return nil, fmt.Errorf("bzip2 compression not supported")
|
||||||
}
|
}
|
||||||
|
|
||||||
// xzCompressor is a CompressorFunc for the xz compression algorithm.
|
// xzCompressor is a CompressorFunc for the xz compression algorithm.
|
||||||
func xzCompressor(r io.Writer, level *int) (io.WriteCloser, error) {
|
func xzCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) {
|
||||||
return xz.NewWriter(r)
|
return xz.NewWriter(r)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CompressStream returns the compressor by its name
|
// CompressStream returns the compressor by its name
|
||||||
func CompressStream(dest io.Writer, algo Algorithm, level *int) (io.WriteCloser, error) {
|
func CompressStream(dest io.Writer, algo Algorithm, level *int) (io.WriteCloser, error) {
|
||||||
return internal.AlgorithmCompressor(algo)(dest, level)
|
m := map[string]string{}
|
||||||
|
return internal.AlgorithmCompressor(algo)(dest, m, level)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CompressStreamWithMetadata returns the compressor by its name. If the compression
|
||||||
|
// generates any metadata, it is written to the provided metadata map.
|
||||||
|
func CompressStreamWithMetadata(dest io.Writer, metadata map[string]string, algo Algorithm, level *int) (io.WriteCloser, error) {
|
||||||
|
return internal.AlgorithmCompressor(algo)(dest, metadata, level)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DetectCompressionFormat returns an Algorithm and DecompressorFunc if the input is recognized as a compressed format, an invalid
|
// DetectCompressionFormat returns an Algorithm and DecompressorFunc if the input is recognized as a compressed format, an invalid
|
||||||
|
@ -135,13 +146,13 @@ func DetectCompression(input io.Reader) (DecompressorFunc, io.Reader, error) {
|
||||||
func AutoDecompress(stream io.Reader) (io.ReadCloser, bool, error) {
|
func AutoDecompress(stream io.Reader) (io.ReadCloser, bool, error) {
|
||||||
decompressor, stream, err := DetectCompression(stream)
|
decompressor, stream, err := DetectCompression(stream)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, false, errors.Wrapf(err, "Error detecting compression")
|
return nil, false, errors.Wrapf(err, "detecting compression")
|
||||||
}
|
}
|
||||||
var res io.ReadCloser
|
var res io.ReadCloser
|
||||||
if decompressor != nil {
|
if decompressor != nil {
|
||||||
res, err = decompressor(stream)
|
res, err = decompressor(stream)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, false, errors.Wrapf(err, "Error initializing decompression")
|
return nil, false, errors.Wrapf(err, "initializing decompression")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
res = ioutil.NopCloser(stream)
|
res = ioutil.NopCloser(stream)
|
||||||
|
|
|
@ -4,7 +4,7 @@ import "io"
|
||||||
|
|
||||||
// CompressorFunc writes the compressed stream to the given writer using the specified compression level.
|
// CompressorFunc writes the compressed stream to the given writer using the specified compression level.
|
||||||
// The caller must call Close() on the stream (even if the input stream does not need closing!).
|
// The caller must call Close() on the stream (even if the input stream does not need closing!).
|
||||||
type CompressorFunc func(io.Writer, *int) (io.WriteCloser, error)
|
type CompressorFunc func(io.Writer, map[string]string, *int) (io.WriteCloser, error)
|
||||||
|
|
||||||
// DecompressorFunc returns the decompressed stream, given a compressed stream.
|
// DecompressorFunc returns the decompressed stream, given a compressed stream.
|
||||||
// The caller must call Close() on the decompressed stream (even if the compressed input stream does not need closing!).
|
// The caller must call Close() on the decompressed stream (even if the compressed input stream does not need closing!).
|
||||||
|
@ -13,6 +13,7 @@ type DecompressorFunc func(io.Reader) (io.ReadCloser, error)
|
||||||
// Algorithm is a compression algorithm that can be used for CompressStream.
|
// Algorithm is a compression algorithm that can be used for CompressStream.
|
||||||
type Algorithm struct {
|
type Algorithm struct {
|
||||||
name string
|
name string
|
||||||
|
mime string
|
||||||
prefix []byte
|
prefix []byte
|
||||||
decompressor DecompressorFunc
|
decompressor DecompressorFunc
|
||||||
compressor CompressorFunc
|
compressor CompressorFunc
|
||||||
|
@ -21,9 +22,10 @@ type Algorithm struct {
|
||||||
// NewAlgorithm creates an Algorithm instance.
|
// NewAlgorithm creates an Algorithm instance.
|
||||||
// This function exists so that Algorithm instances can only be created by code that
|
// This function exists so that Algorithm instances can only be created by code that
|
||||||
// is allowed to import this internal subpackage.
|
// is allowed to import this internal subpackage.
|
||||||
func NewAlgorithm(name string, prefix []byte, decompressor DecompressorFunc, compressor CompressorFunc) Algorithm {
|
func NewAlgorithm(name, mime string, prefix []byte, decompressor DecompressorFunc, compressor CompressorFunc) Algorithm {
|
||||||
return Algorithm{
|
return Algorithm{
|
||||||
name: name,
|
name: name,
|
||||||
|
mime: mime,
|
||||||
prefix: prefix,
|
prefix: prefix,
|
||||||
decompressor: decompressor,
|
decompressor: decompressor,
|
||||||
compressor: compressor,
|
compressor: compressor,
|
||||||
|
@ -35,6 +37,12 @@ func (c Algorithm) Name() string {
|
||||||
return c.name
|
return c.name
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// InternalUnstableUndocumentedMIMEQuestionMark ???
|
||||||
|
// DO NOT USE THIS anywhere outside of c/image until it is properly documented.
|
||||||
|
func (c Algorithm) InternalUnstableUndocumentedMIMEQuestionMark() string {
|
||||||
|
return c.mime
|
||||||
|
}
|
||||||
|
|
||||||
// AlgorithmCompressor returns the compressor field of algo.
|
// AlgorithmCompressor returns the compressor field of algo.
|
||||||
// This is a function instead of a public method so that it is only callable from by code
|
// This is a function instead of a public method so that it is only callable from by code
|
||||||
// that is allowed to import this internal subpackage.
|
// that is allowed to import this internal subpackage.
|
||||||
|
|
|
@ -40,13 +40,13 @@ func zstdWriter(dest io.Writer) (io.WriteCloser, error) {
|
||||||
return zstd.NewWriter(dest)
|
return zstd.NewWriter(dest)
|
||||||
}
|
}
|
||||||
|
|
||||||
func zstdWriterWithLevel(dest io.Writer, level int) (io.WriteCloser, error) {
|
func zstdWriterWithLevel(dest io.Writer, level int) (*zstd.Encoder, error) {
|
||||||
el := zstd.EncoderLevelFromZstd(level)
|
el := zstd.EncoderLevelFromZstd(level)
|
||||||
return zstd.NewWriter(dest, zstd.WithEncoderLevel(el))
|
return zstd.NewWriter(dest, zstd.WithEncoderLevel(el))
|
||||||
}
|
}
|
||||||
|
|
||||||
// zstdCompressor is a CompressorFunc for the zstd compression algorithm.
|
// zstdCompressor is a CompressorFunc for the zstd compression algorithm.
|
||||||
func zstdCompressor(r io.Writer, level *int) (io.WriteCloser, error) {
|
func zstdCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) {
|
||||||
if level == nil {
|
if level == nil {
|
||||||
return zstdWriter(r)
|
return zstdWriter(r)
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,6 +11,7 @@ import (
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/containers/image/v5/docker/reference"
|
||||||
"github.com/containers/image/v5/pkg/sysregistriesv2"
|
"github.com/containers/image/v5/pkg/sysregistriesv2"
|
||||||
"github.com/containers/image/v5/types"
|
"github.com/containers/image/v5/types"
|
||||||
"github.com/containers/storage/pkg/homedir"
|
"github.com/containers/storage/pkg/homedir"
|
||||||
|
@ -51,12 +52,19 @@ var (
|
||||||
ErrNotSupported = errors.New("not supported")
|
ErrNotSupported = errors.New("not supported")
|
||||||
)
|
)
|
||||||
|
|
||||||
// SetCredentials stores the username and password in the credential helper or file
|
// SetCredentials stores the username and password in a location
|
||||||
// and returns path to file or helper name in format (helper:%s).
|
// appropriate for sys and the users’ configuration.
|
||||||
|
// A valid key can be either a registry hostname or additionally a namespace if
|
||||||
|
// the AuthenticationFileHelper is being unsed.
|
||||||
// Returns a human-redable description of the location that was updated.
|
// Returns a human-redable description of the location that was updated.
|
||||||
// NOTE: The return value is only intended to be read by humans; its form is not an API,
|
// NOTE: The return value is only intended to be read by humans; its form is not an API,
|
||||||
// it may change (or new forms can be added) any time.
|
// it may change (or new forms can be added) any time.
|
||||||
func SetCredentials(sys *types.SystemContext, registry, username, password string) (string, error) {
|
func SetCredentials(sys *types.SystemContext, key, username, password string) (string, error) {
|
||||||
|
isNamespaced, err := validateKey(key)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
helpers, err := sysregistriesv2.CredentialHelpers(sys)
|
helpers, err := sysregistriesv2.CredentialHelpers(sys)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
@ -71,33 +79,45 @@ func SetCredentials(sys *types.SystemContext, registry, username, password strin
|
||||||
// Special-case the built-in helpers for auth files.
|
// Special-case the built-in helpers for auth files.
|
||||||
case sysregistriesv2.AuthenticationFileHelper:
|
case sysregistriesv2.AuthenticationFileHelper:
|
||||||
desc, err = modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) {
|
desc, err = modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) {
|
||||||
if ch, exists := auths.CredHelpers[registry]; exists {
|
if ch, exists := auths.CredHelpers[key]; exists {
|
||||||
return false, setAuthToCredHelper(ch, registry, username, password)
|
if isNamespaced {
|
||||||
|
return false, unsupportedNamespaceErr(ch)
|
||||||
|
}
|
||||||
|
return false, setAuthToCredHelper(ch, key, username, password)
|
||||||
}
|
}
|
||||||
creds := base64.StdEncoding.EncodeToString([]byte(username + ":" + password))
|
creds := base64.StdEncoding.EncodeToString([]byte(username + ":" + password))
|
||||||
newCreds := dockerAuthConfig{Auth: creds}
|
newCreds := dockerAuthConfig{Auth: creds}
|
||||||
auths.AuthConfigs[registry] = newCreds
|
auths.AuthConfigs[key] = newCreds
|
||||||
return true, nil
|
return true, nil
|
||||||
})
|
})
|
||||||
// External helpers.
|
// External helpers.
|
||||||
default:
|
default:
|
||||||
desc = fmt.Sprintf("credential helper: %s", helper)
|
if isNamespaced {
|
||||||
err = setAuthToCredHelper(helper, registry, username, password)
|
err = unsupportedNamespaceErr(helper)
|
||||||
|
} else {
|
||||||
|
desc = fmt.Sprintf("credential helper: %s", helper)
|
||||||
|
err = setAuthToCredHelper(helper, key, username, password)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
multiErr = multierror.Append(multiErr, err)
|
multiErr = multierror.Append(multiErr, err)
|
||||||
logrus.Debugf("Error storing credentials for %s in credential helper %s: %v", registry, helper, err)
|
logrus.Debugf("Error storing credentials for %s in credential helper %s: %v", key, helper, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
logrus.Debugf("Stored credentials for %s in credential helper %s", registry, helper)
|
logrus.Debugf("Stored credentials for %s in credential helper %s", key, helper)
|
||||||
return desc, nil
|
return desc, nil
|
||||||
}
|
}
|
||||||
return "", multiErr
|
return "", multiErr
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func unsupportedNamespaceErr(helper string) error {
|
||||||
|
return errors.Errorf("namespaced key is not supported for credential helper %s", helper)
|
||||||
|
}
|
||||||
|
|
||||||
// SetAuthentication stores the username and password in the credential helper or file
|
// SetAuthentication stores the username and password in the credential helper or file
|
||||||
func SetAuthentication(sys *types.SystemContext, registry, username, password string) error {
|
// See the documentation of SetCredentials for format of "key"
|
||||||
_, err := SetCredentials(sys, registry, username, password)
|
func SetAuthentication(sys *types.SystemContext, key, username, password string) error {
|
||||||
|
_, err := SetCredentials(sys, key, username, password)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -125,7 +145,7 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon
|
||||||
// readJSONFile returns an empty map in case the path doesn't exist.
|
// readJSONFile returns an empty map in case the path doesn't exist.
|
||||||
auths, err := readJSONFile(path.path, path.legacyFormat)
|
auths, err := readJSONFile(path.path, path.legacyFormat)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "error reading JSON file %q", path.path)
|
return nil, errors.Wrapf(err, "reading JSON file %q", path.path)
|
||||||
}
|
}
|
||||||
// Credential helpers in the auth file have a
|
// Credential helpers in the auth file have a
|
||||||
// direct mapping to a registry, so we can just
|
// direct mapping to a registry, so we can just
|
||||||
|
@ -215,13 +235,34 @@ func getAuthFilePaths(sys *types.SystemContext, homeDir string) []authPath {
|
||||||
// helpers with falling back to using either auth.json
|
// helpers with falling back to using either auth.json
|
||||||
// file or .docker/config.json, including support for OAuth2 and IdentityToken.
|
// file or .docker/config.json, including support for OAuth2 and IdentityToken.
|
||||||
// If an entry is not found, an empty struct is returned.
|
// If an entry is not found, an empty struct is returned.
|
||||||
|
//
|
||||||
|
// Deprecated: GetCredentialsForRef should be used in favor of this API
|
||||||
|
// because it allows different credentials for different repositories on the
|
||||||
|
// same registry.
|
||||||
func GetCredentials(sys *types.SystemContext, registry string) (types.DockerAuthConfig, error) {
|
func GetCredentials(sys *types.SystemContext, registry string) (types.DockerAuthConfig, error) {
|
||||||
return getCredentialsWithHomeDir(sys, registry, homedir.Get())
|
return getCredentialsWithHomeDir(sys, nil, registry, homedir.Get())
|
||||||
}
|
}
|
||||||
|
|
||||||
// getCredentialsWithHomeDir is an internal implementation detail of GetCredentials,
|
// GetCredentialsForRef returns the registry credentials necessary for
|
||||||
// it exists only to allow testing it with an artificial home directory.
|
// accessing ref on the registry ref points to,
|
||||||
func getCredentialsWithHomeDir(sys *types.SystemContext, registry, homeDir string) (types.DockerAuthConfig, error) {
|
// appropriate for sys and the users’ configuration.
|
||||||
|
// If an entry is not found, an empty struct is returned.
|
||||||
|
func GetCredentialsForRef(sys *types.SystemContext, ref reference.Named) (types.DockerAuthConfig, error) {
|
||||||
|
return getCredentialsWithHomeDir(sys, ref, reference.Domain(ref), homedir.Get())
|
||||||
|
}
|
||||||
|
|
||||||
|
// getCredentialsWithHomeDir is an internal implementation detail of
|
||||||
|
// GetCredentialsForRef and GetCredentials. It exists only to allow testing it
|
||||||
|
// with an artificial home directory.
|
||||||
|
func getCredentialsWithHomeDir(sys *types.SystemContext, ref reference.Named, registry, homeDir string) (types.DockerAuthConfig, error) {
|
||||||
|
// consistency check of the ref and registry arguments
|
||||||
|
if ref != nil && reference.Domain(ref) != registry {
|
||||||
|
return types.DockerAuthConfig{}, errors.Errorf(
|
||||||
|
"internal error: provided reference domain %q name does not match registry %q",
|
||||||
|
reference.Domain(ref), registry,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
if sys != nil && sys.DockerAuthConfig != nil {
|
if sys != nil && sys.DockerAuthConfig != nil {
|
||||||
logrus.Debugf("Returning credentials for %s from DockerAuthConfig", registry)
|
logrus.Debugf("Returning credentials for %s from DockerAuthConfig", registry)
|
||||||
return *sys.DockerAuthConfig, nil
|
return *sys.DockerAuthConfig, nil
|
||||||
|
@ -230,7 +271,7 @@ func getCredentialsWithHomeDir(sys *types.SystemContext, registry, homeDir strin
|
||||||
// Anonymous function to query credentials from auth files.
|
// Anonymous function to query credentials from auth files.
|
||||||
getCredentialsFromAuthFiles := func() (types.DockerAuthConfig, error) {
|
getCredentialsFromAuthFiles := func() (types.DockerAuthConfig, error) {
|
||||||
for _, path := range getAuthFilePaths(sys, homeDir) {
|
for _, path := range getAuthFilePaths(sys, homeDir) {
|
||||||
authConfig, err := findAuthentication(registry, path.path, path.legacyFormat)
|
authConfig, err := findAuthentication(ref, registry, path.path, path.legacyFormat)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return types.DockerAuthConfig{}, err
|
return types.DockerAuthConfig{}, err
|
||||||
}
|
}
|
||||||
|
@ -284,7 +325,7 @@ func getCredentialsWithHomeDir(sys *types.SystemContext, registry, homeDir strin
|
||||||
// .docker/config.json
|
// .docker/config.json
|
||||||
//
|
//
|
||||||
// Deprecated: This API only has support for username and password. To get the
|
// Deprecated: This API only has support for username and password. To get the
|
||||||
// support for oauth2 in docker registry authentication, we added the new
|
// support for oauth2 in container registry authentication, we added the new
|
||||||
// GetCredentials API. The new API should be used and this API is kept to
|
// GetCredentials API. The new API should be used and this API is kept to
|
||||||
// maintain backward compatibility.
|
// maintain backward compatibility.
|
||||||
func GetAuthentication(sys *types.SystemContext, registry string) (string, string, error) {
|
func GetAuthentication(sys *types.SystemContext, registry string) (string, string, error) {
|
||||||
|
@ -294,7 +335,7 @@ func GetAuthentication(sys *types.SystemContext, registry string) (string, strin
|
||||||
// getAuthenticationWithHomeDir is an internal implementation detail of GetAuthentication,
|
// getAuthenticationWithHomeDir is an internal implementation detail of GetAuthentication,
|
||||||
// it exists only to allow testing it with an artificial home directory.
|
// it exists only to allow testing it with an artificial home directory.
|
||||||
func getAuthenticationWithHomeDir(sys *types.SystemContext, registry, homeDir string) (string, string, error) {
|
func getAuthenticationWithHomeDir(sys *types.SystemContext, registry, homeDir string) (string, string, error) {
|
||||||
auth, err := getCredentialsWithHomeDir(sys, registry, homeDir)
|
auth, err := getCredentialsWithHomeDir(sys, nil, registry, homeDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", "", err
|
return "", "", err
|
||||||
}
|
}
|
||||||
|
@ -304,9 +345,16 @@ func getAuthenticationWithHomeDir(sys *types.SystemContext, registry, homeDir st
|
||||||
return auth.Username, auth.Password, nil
|
return auth.Username, auth.Password, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// RemoveAuthentication removes credentials for `registry` from all possible
|
// RemoveAuthentication removes credentials for `key` from all possible
|
||||||
// sources such as credential helpers and auth files.
|
// sources such as credential helpers and auth files.
|
||||||
func RemoveAuthentication(sys *types.SystemContext, registry string) error {
|
// A valid key can be either a registry hostname or additionally a namespace if
|
||||||
|
// the AuthenticationFileHelper is being unsed.
|
||||||
|
func RemoveAuthentication(sys *types.SystemContext, key string) error {
|
||||||
|
isNamespaced, err := validateKey(key)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
helpers, err := sysregistriesv2.CredentialHelpers(sys)
|
helpers, err := sysregistriesv2.CredentialHelpers(sys)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -316,17 +364,22 @@ func RemoveAuthentication(sys *types.SystemContext, registry string) error {
|
||||||
isLoggedIn := false
|
isLoggedIn := false
|
||||||
|
|
||||||
removeFromCredHelper := func(helper string) {
|
removeFromCredHelper := func(helper string) {
|
||||||
err := deleteAuthFromCredHelper(helper, registry)
|
if isNamespaced {
|
||||||
if err == nil {
|
logrus.Debugf("Not removing credentials because namespaced keys are not supported for the credential helper: %s", helper)
|
||||||
logrus.Debugf("Credentials for %q were deleted from credential helper %s", registry, helper)
|
|
||||||
isLoggedIn = true
|
|
||||||
return
|
return
|
||||||
|
} else {
|
||||||
|
err := deleteAuthFromCredHelper(helper, key)
|
||||||
|
if err == nil {
|
||||||
|
logrus.Debugf("Credentials for %q were deleted from credential helper %s", key, helper)
|
||||||
|
isLoggedIn = true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if credentials.IsErrCredentialsNotFoundMessage(err.Error()) {
|
||||||
|
logrus.Debugf("Not logged in to %s with credential helper %s", key, helper)
|
||||||
|
return
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if credentials.IsErrCredentialsNotFoundMessage(err.Error()) {
|
multiErr = multierror.Append(multiErr, errors.Wrapf(err, "removing credentials for %s from credential helper %s", key, helper))
|
||||||
logrus.Debugf("Not logged in to %s with credential helper %s", registry, helper)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
multiErr = multierror.Append(multiErr, errors.Wrapf(err, "error removing credentials for %s from credential helper %s", registry, helper))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, helper := range helpers {
|
for _, helper := range helpers {
|
||||||
|
@ -335,15 +388,12 @@ func RemoveAuthentication(sys *types.SystemContext, registry string) error {
|
||||||
// Special-case the built-in helper for auth files.
|
// Special-case the built-in helper for auth files.
|
||||||
case sysregistriesv2.AuthenticationFileHelper:
|
case sysregistriesv2.AuthenticationFileHelper:
|
||||||
_, err = modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) {
|
_, err = modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) {
|
||||||
if innerHelper, exists := auths.CredHelpers[registry]; exists {
|
if innerHelper, exists := auths.CredHelpers[key]; exists {
|
||||||
removeFromCredHelper(innerHelper)
|
removeFromCredHelper(innerHelper)
|
||||||
}
|
}
|
||||||
if _, ok := auths.AuthConfigs[registry]; ok {
|
if _, ok := auths.AuthConfigs[key]; ok {
|
||||||
isLoggedIn = true
|
isLoggedIn = true
|
||||||
delete(auths.AuthConfigs, registry)
|
delete(auths.AuthConfigs, key)
|
||||||
} else if _, ok := auths.AuthConfigs[normalizeRegistry(registry)]; ok {
|
|
||||||
isLoggedIn = true
|
|
||||||
delete(auths.AuthConfigs, normalizeRegistry(registry))
|
|
||||||
}
|
}
|
||||||
return true, multiErr
|
return true, multiErr
|
||||||
})
|
})
|
||||||
|
@ -486,13 +536,13 @@ func readJSONFile(path string, legacyFormat bool) (dockerConfigFile, error) {
|
||||||
|
|
||||||
if legacyFormat {
|
if legacyFormat {
|
||||||
if err = json.Unmarshal(raw, &auths.AuthConfigs); err != nil {
|
if err = json.Unmarshal(raw, &auths.AuthConfigs); err != nil {
|
||||||
return dockerConfigFile{}, errors.Wrapf(err, "error unmarshaling JSON at %q", path)
|
return dockerConfigFile{}, errors.Wrapf(err, "unmarshaling JSON at %q", path)
|
||||||
}
|
}
|
||||||
return auths, nil
|
return auths, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = json.Unmarshal(raw, &auths); err != nil {
|
if err = json.Unmarshal(raw, &auths); err != nil {
|
||||||
return dockerConfigFile{}, errors.Wrapf(err, "error unmarshaling JSON at %q", path)
|
return dockerConfigFile{}, errors.Wrapf(err, "unmarshaling JSON at %q", path)
|
||||||
}
|
}
|
||||||
|
|
||||||
if auths.AuthConfigs == nil {
|
if auths.AuthConfigs == nil {
|
||||||
|
@ -524,21 +574,21 @@ func modifyJSON(sys *types.SystemContext, editor func(auths *dockerConfigFile) (
|
||||||
|
|
||||||
auths, err := readJSONFile(path, false)
|
auths, err := readJSONFile(path, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.Wrapf(err, "error reading JSON file %q", path)
|
return "", errors.Wrapf(err, "reading JSON file %q", path)
|
||||||
}
|
}
|
||||||
|
|
||||||
updated, err := editor(&auths)
|
updated, err := editor(&auths)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.Wrapf(err, "error updating %q", path)
|
return "", errors.Wrapf(err, "updating %q", path)
|
||||||
}
|
}
|
||||||
if updated {
|
if updated {
|
||||||
newData, err := json.MarshalIndent(auths, "", "\t")
|
newData, err := json.MarshalIndent(auths, "", "\t")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.Wrapf(err, "error marshaling JSON %q", path)
|
return "", errors.Wrapf(err, "marshaling JSON %q", path)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = ioutil.WriteFile(path, newData, 0600); err != nil {
|
if err = ioutil.WriteFile(path, newData, 0600); err != nil {
|
||||||
return "", errors.Wrapf(err, "error writing to file %q", path)
|
return "", errors.Wrapf(err, "writing to file %q", path)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -575,11 +625,13 @@ func deleteAuthFromCredHelper(credHelper, registry string) error {
|
||||||
return helperclient.Erase(p, registry)
|
return helperclient.Erase(p, registry)
|
||||||
}
|
}
|
||||||
|
|
||||||
// findAuthentication looks for auth of registry in path
|
// findAuthentication looks for auth of registry in path. If ref is
|
||||||
func findAuthentication(registry, path string, legacyFormat bool) (types.DockerAuthConfig, error) {
|
// not nil, then it will be taken into account when looking up the
|
||||||
|
// authentication credentials.
|
||||||
|
func findAuthentication(ref reference.Named, registry, path string, legacyFormat bool) (types.DockerAuthConfig, error) {
|
||||||
auths, err := readJSONFile(path, legacyFormat)
|
auths, err := readJSONFile(path, legacyFormat)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return types.DockerAuthConfig{}, errors.Wrapf(err, "error reading JSON file %q", path)
|
return types.DockerAuthConfig{}, errors.Wrapf(err, "reading JSON file %q", path)
|
||||||
}
|
}
|
||||||
|
|
||||||
// First try cred helpers. They should always be normalized.
|
// First try cred helpers. They should always be normalized.
|
||||||
|
@ -587,16 +639,36 @@ func findAuthentication(registry, path string, legacyFormat bool) (types.DockerA
|
||||||
return getAuthFromCredHelper(ch, registry)
|
return getAuthFromCredHelper(ch, registry)
|
||||||
}
|
}
|
||||||
|
|
||||||
// I'm feeling lucky
|
// Support for different paths in auth.
|
||||||
if val, exists := auths.AuthConfigs[registry]; exists {
|
// (This is not a feature of ~/.docker/config.json; we support it even for
|
||||||
return decodeDockerAuth(val)
|
// those files as an extension.)
|
||||||
|
var keys []string
|
||||||
|
if !legacyFormat && ref != nil {
|
||||||
|
keys = authKeysForRef(ref)
|
||||||
|
} else {
|
||||||
|
keys = []string{registry}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Repo or namespace keys are only supported as exact matches. For registry
|
||||||
|
// keys we prefer exact matches as well.
|
||||||
|
for _, key := range keys {
|
||||||
|
if val, exists := auths.AuthConfigs[key]; exists {
|
||||||
|
return decodeDockerAuth(val)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// bad luck; let's normalize the entries first
|
// bad luck; let's normalize the entries first
|
||||||
registry = normalizeRegistry(registry)
|
// This primarily happens for legacyFormat, which for a time used API URLs
|
||||||
|
// (http[s:]//…/v1/) as keys.
|
||||||
|
// Secondarily, (docker login) accepted URLs with no normalization for
|
||||||
|
// several years, and matched registry hostnames against that, so support
|
||||||
|
// those entries even in non-legacyFormat ~/.docker/config.json.
|
||||||
|
// The docker.io registry still uses the /v1/ key with a special host name,
|
||||||
|
// so account for that as well.
|
||||||
|
registry = normalizeAuthFileKey(registry, legacyFormat)
|
||||||
normalizedAuths := map[string]dockerAuthConfig{}
|
normalizedAuths := map[string]dockerAuthConfig{}
|
||||||
for k, v := range auths.AuthConfigs {
|
for k, v := range auths.AuthConfigs {
|
||||||
normalizedAuths[normalizeRegistry(k)] = v
|
normalizedAuths[normalizeAuthFileKey(k, legacyFormat)] = v
|
||||||
}
|
}
|
||||||
|
|
||||||
if val, exists := normalizedAuths[registry]; exists {
|
if val, exists := normalizedAuths[registry]; exists {
|
||||||
|
@ -606,6 +678,28 @@ func findAuthentication(registry, path string, legacyFormat bool) (types.DockerA
|
||||||
return types.DockerAuthConfig{}, nil
|
return types.DockerAuthConfig{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// authKeysForRef returns the valid paths for a provided reference. For example,
|
||||||
|
// when given a reference "quay.io/repo/ns/image:tag", then it would return
|
||||||
|
// - quay.io/repo/ns/image
|
||||||
|
// - quay.io/repo/ns
|
||||||
|
// - quay.io/repo
|
||||||
|
// - quay.io
|
||||||
|
func authKeysForRef(ref reference.Named) (res []string) {
|
||||||
|
name := ref.Name()
|
||||||
|
|
||||||
|
for {
|
||||||
|
res = append(res, name)
|
||||||
|
|
||||||
|
lastSlash := strings.LastIndex(name, "/")
|
||||||
|
if lastSlash == -1 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
name = name[:lastSlash]
|
||||||
|
}
|
||||||
|
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
// decodeDockerAuth decodes the username and password, which is
|
// decodeDockerAuth decodes the username and password, which is
|
||||||
// encoded in base64.
|
// encoded in base64.
|
||||||
func decodeDockerAuth(conf dockerAuthConfig) (types.DockerAuthConfig, error) {
|
func decodeDockerAuth(conf dockerAuthConfig) (types.DockerAuthConfig, error) {
|
||||||
|
@ -629,27 +723,36 @@ func decodeDockerAuth(conf dockerAuthConfig) (types.DockerAuthConfig, error) {
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// convertToHostname converts a registry url which has http|https prepended
|
// normalizeAuthFileKey takes a key, converts it to a host name and normalizes
|
||||||
// to just an hostname.
|
// the resulting registry.
|
||||||
// Copied from github.com/docker/docker/registry/auth.go
|
func normalizeAuthFileKey(key string, legacyFormat bool) string {
|
||||||
func convertToHostname(url string) string {
|
stripped := strings.TrimPrefix(key, "http://")
|
||||||
stripped := url
|
stripped = strings.TrimPrefix(stripped, "https://")
|
||||||
if strings.HasPrefix(url, "http://") {
|
|
||||||
stripped = strings.TrimPrefix(url, "http://")
|
if legacyFormat || stripped != key {
|
||||||
} else if strings.HasPrefix(url, "https://") {
|
stripped = strings.SplitN(stripped, "/", 2)[0]
|
||||||
stripped = strings.TrimPrefix(url, "https://")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
nameParts := strings.SplitN(stripped, "/", 2)
|
return normalizeRegistry(stripped)
|
||||||
|
|
||||||
return nameParts[0]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// normalizeRegistry converts the provided registry if a known docker.io host
|
||||||
|
// is provided.
|
||||||
func normalizeRegistry(registry string) string {
|
func normalizeRegistry(registry string) string {
|
||||||
normalized := convertToHostname(registry)
|
switch registry {
|
||||||
switch normalized {
|
|
||||||
case "registry-1.docker.io", "docker.io":
|
case "registry-1.docker.io", "docker.io":
|
||||||
return "index.docker.io"
|
return "index.docker.io"
|
||||||
}
|
}
|
||||||
return normalized
|
return registry
|
||||||
|
}
|
||||||
|
|
||||||
|
// validateKey verifies that the input key does not have a prefix that is not
|
||||||
|
// allowed and returns an indicator if the key is namespaced.
|
||||||
|
func validateKey(key string) (isNamespaced bool, err error) {
|
||||||
|
if strings.HasPrefix(key, "http://") || strings.HasPrefix(key, "https://") {
|
||||||
|
return isNamespaced, errors.Errorf("key %s contains http[s]:// prefix", key)
|
||||||
|
}
|
||||||
|
|
||||||
|
// check if the provided key contains one or more subpaths.
|
||||||
|
return strings.ContainsRune(key, '/'), nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -73,7 +73,7 @@ func removeAllAuthFromKernelKeyring() error { //nolint:deadcode,unused
|
||||||
if strings.HasPrefix(keyDescribe, keyDescribePrefix) {
|
if strings.HasPrefix(keyDescribe, keyDescribePrefix) {
|
||||||
err := keyctl.Unlink(userkeyring, k)
|
err := keyctl.Unlink(userkeyring, k)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "error unlinking key %d", k.ID())
|
return errors.Wrapf(err, "unlinking key %d", k.ID())
|
||||||
}
|
}
|
||||||
logrus.Debugf("unlinked key %d:%s", k.ID(), keyAttr)
|
logrus.Debugf("unlinked key %d:%s", k.ID(), keyAttr)
|
||||||
}
|
}
|
||||||
|
@ -100,16 +100,16 @@ func setAuthToKernelKeyring(registry, username, password string) error { //nolin
|
||||||
// link the key to userKeyring
|
// link the key to userKeyring
|
||||||
userKeyring, err := keyctl.UserKeyring()
|
userKeyring, err := keyctl.UserKeyring()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "error getting user keyring")
|
return errors.Wrapf(err, "getting user keyring")
|
||||||
}
|
}
|
||||||
err = keyctl.Link(userKeyring, id)
|
err = keyctl.Link(userKeyring, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "error linking the key to user keyring")
|
return errors.Wrapf(err, "linking the key to user keyring")
|
||||||
}
|
}
|
||||||
// unlink the key from session keyring
|
// unlink the key from session keyring
|
||||||
err = keyctl.Unlink(keyring, id)
|
err = keyctl.Unlink(keyring, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "error unlinking the key from session keyring")
|
return errors.Wrapf(err, "unlinking the key from session keyring")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -211,7 +211,7 @@ func (c *PullCandidate) Record() error {
|
||||||
value := reference.TrimNamed(c.Value)
|
value := reference.TrimNamed(c.Value)
|
||||||
|
|
||||||
if err := Add(c.resolved.systemContext, name.String(), value); err != nil {
|
if err := Add(c.resolved.systemContext, name.String(), value); err != nil {
|
||||||
return errors.Wrapf(err, "error recording short-name alias (%q=%q)", c.resolved.userInput, c.Value)
|
return errors.Wrapf(err, "recording short-name alias (%q=%q)", c.resolved.userInput, c.Value)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -323,7 +323,7 @@ func Resolve(ctx *types.SystemContext, name string) (*Resolved, error) {
|
||||||
for _, reg := range unqualifiedSearchRegistries {
|
for _, reg := range unqualifiedSearchRegistries {
|
||||||
named, err := reference.ParseNormalizedNamed(fmt.Sprintf("%s/%s", reg, name))
|
named, err := reference.ParseNormalizedNamed(fmt.Sprintf("%s/%s", reg, name))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "error creating reference with unqualified-search registry %q", reg)
|
return nil, errors.Wrapf(err, "creating reference with unqualified-search registry %q", reg)
|
||||||
}
|
}
|
||||||
// Make sure to add ":latest" if needed
|
// Make sure to add ":latest" if needed
|
||||||
named = reference.TagNameOnly(named)
|
named = reference.TagNameOnly(named)
|
||||||
|
@ -450,7 +450,7 @@ func ResolveLocally(ctx *types.SystemContext, name string) ([]reference.Named, e
|
||||||
for _, reg := range append([]string{"localhost"}, unqualifiedSearchRegistries...) {
|
for _, reg := range append([]string{"localhost"}, unqualifiedSearchRegistries...) {
|
||||||
named, err := reference.ParseNormalizedNamed(fmt.Sprintf("%s/%s", reg, name))
|
named, err := reference.ParseNormalizedNamed(fmt.Sprintf("%s/%s", reg, name))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "error creating reference with unqualified-search registry %q", reg)
|
return nil, errors.Wrapf(err, "creating reference with unqualified-search registry %q", reg)
|
||||||
}
|
}
|
||||||
// Make sure to add ":latest" if needed
|
// Make sure to add ":latest" if needed
|
||||||
named = reference.TagNameOnly(named)
|
named = reference.TagNameOnly(named)
|
||||||
|
|
|
@ -3,6 +3,7 @@ package sysregistriesv2
|
||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/BurntSushi/toml"
|
"github.com/BurntSushi/toml"
|
||||||
|
@ -49,6 +50,17 @@ type shortNameAliasConf struct {
|
||||||
// reference counter parts.
|
// reference counter parts.
|
||||||
// Note that Aliases is niled after being loaded from a file.
|
// Note that Aliases is niled after being loaded from a file.
|
||||||
Aliases map[string]string `toml:"aliases"`
|
Aliases map[string]string `toml:"aliases"`
|
||||||
|
|
||||||
|
// If you add any field, make sure to update nonempty() below.
|
||||||
|
}
|
||||||
|
|
||||||
|
// nonempty returns true if config contains at least one configuration entry.
|
||||||
|
func (c *shortNameAliasConf) nonempty() bool {
|
||||||
|
copy := *c // A shallow copy
|
||||||
|
if copy.Aliases != nil && len(copy.Aliases) == 0 {
|
||||||
|
copy.Aliases = nil
|
||||||
|
}
|
||||||
|
return !reflect.DeepEqual(copy, shortNameAliasConf{})
|
||||||
}
|
}
|
||||||
|
|
||||||
// alias combines the parsed value of an alias with the config file it has been
|
// alias combines the parsed value of an alias with the config file it has been
|
||||||
|
@ -197,7 +209,7 @@ func RemoveShortNameAlias(ctx *types.SystemContext, name string) error {
|
||||||
func parseShortNameValue(alias string) (reference.Named, error) {
|
func parseShortNameValue(alias string) (reference.Named, error) {
|
||||||
ref, err := reference.Parse(alias)
|
ref, err := reference.Parse(alias)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "error parsing alias %q", alias)
|
return nil, errors.Wrapf(err, "parsing alias %q", alias)
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, ok := ref.(reference.Digested); ok {
|
if _, ok := ref.(reference.Digested); ok {
|
||||||
|
@ -306,14 +318,14 @@ func loadShortNameAliasConf(confPath string) (*shortNameAliasConf, *shortNameAli
|
||||||
_, err := toml.DecodeFile(confPath, &conf)
|
_, err := toml.DecodeFile(confPath, &conf)
|
||||||
if err != nil && !os.IsNotExist(err) {
|
if err != nil && !os.IsNotExist(err) {
|
||||||
// It's okay if the config doesn't exist. Other errors are not.
|
// It's okay if the config doesn't exist. Other errors are not.
|
||||||
return nil, nil, errors.Wrapf(err, "error loading short-name aliases config file %q", confPath)
|
return nil, nil, errors.Wrapf(err, "loading short-name aliases config file %q", confPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Even if we don’t always need the cache, doing so validates the machine-generated config. The
|
// Even if we don’t always need the cache, doing so validates the machine-generated config. The
|
||||||
// file could still be corrupted by another process or user.
|
// file could still be corrupted by another process or user.
|
||||||
cache, err := newShortNameAliasCache(confPath, &conf)
|
cache, err := newShortNameAliasCache(confPath, &conf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, errors.Wrapf(err, "error loading short-name aliases config file %q", confPath)
|
return nil, nil, errors.Wrapf(err, "loading short-name aliases config file %q", confPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &conf, cache, nil
|
return &conf, cache, nil
|
||||||
|
|
43
vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
generated
vendored
43
vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
generated
vendored
|
@ -4,6 +4,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"reflect"
|
||||||
"regexp"
|
"regexp"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -87,7 +88,7 @@ func (e *Endpoint) rewriteReference(ref reference.Named, prefix string) (referen
|
||||||
newNamedRef = e.Location + refString[prefixLen:]
|
newNamedRef = e.Location + refString[prefixLen:]
|
||||||
newParsedRef, err := reference.ParseNamed(newNamedRef)
|
newParsedRef, err := reference.ParseNamed(newNamedRef)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "error rewriting reference")
|
return nil, errors.Wrapf(err, "rewriting reference")
|
||||||
}
|
}
|
||||||
|
|
||||||
return newParsedRef, nil
|
return newParsedRef, nil
|
||||||
|
@ -172,9 +173,17 @@ type V1RegistriesConf struct {
|
||||||
|
|
||||||
// Nonempty returns true if config contains at least one configuration entry.
|
// Nonempty returns true if config contains at least one configuration entry.
|
||||||
func (config *V1RegistriesConf) Nonempty() bool {
|
func (config *V1RegistriesConf) Nonempty() bool {
|
||||||
return (len(config.V1TOMLConfig.Search.Registries) != 0 ||
|
copy := *config // A shallow copy
|
||||||
len(config.V1TOMLConfig.Insecure.Registries) != 0 ||
|
if copy.V1TOMLConfig.Search.Registries != nil && len(copy.V1TOMLConfig.Search.Registries) == 0 {
|
||||||
len(config.V1TOMLConfig.Block.Registries) != 0)
|
copy.V1TOMLConfig.Search.Registries = nil
|
||||||
|
}
|
||||||
|
if copy.V1TOMLConfig.Insecure.Registries != nil && len(copy.V1TOMLConfig.Insecure.Registries) == 0 {
|
||||||
|
copy.V1TOMLConfig.Insecure.Registries = nil
|
||||||
|
}
|
||||||
|
if copy.V1TOMLConfig.Block.Registries != nil && len(copy.V1TOMLConfig.Block.Registries) == 0 {
|
||||||
|
copy.V1TOMLConfig.Block.Registries = nil
|
||||||
|
}
|
||||||
|
return !reflect.DeepEqual(copy, V1RegistriesConf{})
|
||||||
}
|
}
|
||||||
|
|
||||||
// V2RegistriesConf is the sysregistries v2 configuration format.
|
// V2RegistriesConf is the sysregistries v2 configuration format.
|
||||||
|
@ -203,12 +212,26 @@ type V2RegistriesConf struct {
|
||||||
ShortNameMode string `toml:"short-name-mode"`
|
ShortNameMode string `toml:"short-name-mode"`
|
||||||
|
|
||||||
shortNameAliasConf
|
shortNameAliasConf
|
||||||
|
|
||||||
|
// If you add any field, make sure to update Nonempty() below.
|
||||||
}
|
}
|
||||||
|
|
||||||
// Nonempty returns true if config contains at least one configuration entry.
|
// Nonempty returns true if config contains at least one configuration entry.
|
||||||
func (config *V2RegistriesConf) Nonempty() bool {
|
func (config *V2RegistriesConf) Nonempty() bool {
|
||||||
return (len(config.Registries) != 0 ||
|
copy := *config // A shallow copy
|
||||||
len(config.UnqualifiedSearchRegistries) != 0)
|
if copy.Registries != nil && len(copy.Registries) == 0 {
|
||||||
|
copy.Registries = nil
|
||||||
|
}
|
||||||
|
if copy.UnqualifiedSearchRegistries != nil && len(copy.UnqualifiedSearchRegistries) == 0 {
|
||||||
|
copy.UnqualifiedSearchRegistries = nil
|
||||||
|
}
|
||||||
|
if copy.CredentialHelpers != nil && len(copy.CredentialHelpers) == 0 {
|
||||||
|
copy.CredentialHelpers = nil
|
||||||
|
}
|
||||||
|
if !copy.shortNameAliasConf.nonempty() {
|
||||||
|
copy.shortNameAliasConf = shortNameAliasConf{}
|
||||||
|
}
|
||||||
|
return !reflect.DeepEqual(copy, V2RegistriesConf{})
|
||||||
}
|
}
|
||||||
|
|
||||||
// parsedConfig is the result of parsing, and possibly merging, configuration files;
|
// parsedConfig is the result of parsing, and possibly merging, configuration files;
|
||||||
|
@ -604,7 +627,7 @@ func dropInConfigs(wrapper configWrapper) ([]string, error) {
|
||||||
if err != nil && !os.IsNotExist(err) {
|
if err != nil && !os.IsNotExist(err) {
|
||||||
// Ignore IsNotExist errors: most systems won't have a registries.conf.d
|
// Ignore IsNotExist errors: most systems won't have a registries.conf.d
|
||||||
// directory.
|
// directory.
|
||||||
return nil, errors.Wrapf(err, "error reading registries.conf.d")
|
return nil, errors.Wrapf(err, "reading registries.conf.d")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -646,7 +669,7 @@ func tryUpdatingCache(ctx *types.SystemContext, wrapper configWrapper) (*parsedC
|
||||||
return nil, err // Should never happen
|
return nil, err // Should never happen
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return nil, errors.Wrapf(err, "error loading registries configuration %q", wrapper.configPath)
|
return nil, errors.Wrapf(err, "loading registries configuration %q", wrapper.configPath)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -659,7 +682,7 @@ func tryUpdatingCache(ctx *types.SystemContext, wrapper configWrapper) (*parsedC
|
||||||
// Enforce v2 format for drop-in-configs.
|
// Enforce v2 format for drop-in-configs.
|
||||||
dropIn, err := loadConfigFile(path, true)
|
dropIn, err := loadConfigFile(path, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "error loading drop-in registries configuration %q", path)
|
return nil, errors.Wrapf(err, "loading drop-in registries configuration %q", path)
|
||||||
}
|
}
|
||||||
config.updateWithConfigurationFrom(dropIn)
|
config.updateWithConfigurationFrom(dropIn)
|
||||||
}
|
}
|
||||||
|
@ -910,7 +933,7 @@ func loadConfigFile(path string, forceV2 bool) (*parsedConfig, error) {
|
||||||
// Parse and validate short-name aliases.
|
// Parse and validate short-name aliases.
|
||||||
cache, err := newShortNameAliasCache(path, &res.partialV2.shortNameAliasConf)
|
cache, err := newShortNameAliasCache(path, &res.partialV2.shortNameAliasConf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "error validating short-name aliases")
|
return nil, errors.Wrap(err, "validating short-name aliases")
|
||||||
}
|
}
|
||||||
res.aliasCache = cache
|
res.aliasCache = cache
|
||||||
// Clear conf.partialV2.shortNameAliasConf to make it available for garbage collection and
|
// Clear conf.partialV2.shortNameAliasConf to make it available for garbage collection and
|
||||||
|
|
|
@ -23,7 +23,9 @@ import (
|
||||||
"github.com/containers/image/v5/pkg/blobinfocache/none"
|
"github.com/containers/image/v5/pkg/blobinfocache/none"
|
||||||
"github.com/containers/image/v5/types"
|
"github.com/containers/image/v5/types"
|
||||||
"github.com/containers/storage"
|
"github.com/containers/storage"
|
||||||
|
"github.com/containers/storage/drivers"
|
||||||
"github.com/containers/storage/pkg/archive"
|
"github.com/containers/storage/pkg/archive"
|
||||||
|
"github.com/containers/storage/pkg/chunked"
|
||||||
"github.com/containers/storage/pkg/ioutils"
|
"github.com/containers/storage/pkg/ioutils"
|
||||||
digest "github.com/opencontainers/go-digest"
|
digest "github.com/opencontainers/go-digest"
|
||||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
|
@ -59,6 +61,7 @@ type storageImageDestination struct {
|
||||||
directory string // Temporary directory where we store blobs until Commit() time
|
directory string // Temporary directory where we store blobs until Commit() time
|
||||||
nextTempFileID int32 // A counter that we use for computing filenames to assign to blobs
|
nextTempFileID int32 // A counter that we use for computing filenames to assign to blobs
|
||||||
manifest []byte // Manifest contents, temporary
|
manifest []byte // Manifest contents, temporary
|
||||||
|
manifestDigest digest.Digest // Valid if len(manifest) != 0
|
||||||
signatures []byte // Signature contents, temporary
|
signatures []byte // Signature contents, temporary
|
||||||
signatureses map[digest.Digest][]byte // Instance signature contents, temporary
|
signatureses map[digest.Digest][]byte // Instance signature contents, temporary
|
||||||
SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice
|
SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice
|
||||||
|
@ -76,12 +79,13 @@ type storageImageDestination struct {
|
||||||
indexToStorageID map[int]*string
|
indexToStorageID map[int]*string
|
||||||
// All accesses to below data are protected by `lock` which is made
|
// All accesses to below data are protected by `lock` which is made
|
||||||
// *explicit* in the code.
|
// *explicit* in the code.
|
||||||
blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs
|
blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs
|
||||||
fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes
|
fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes
|
||||||
filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them
|
filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them
|
||||||
currentIndex int // The index of the layer to be committed (i.e., lower indices have already been committed)
|
currentIndex int // The index of the layer to be committed (i.e., lower indices have already been committed)
|
||||||
indexToPulledLayerInfo map[int]*manifest.LayerInfo // Mapping from layer (by index) to pulled down blob
|
indexToPulledLayerInfo map[int]*manifest.LayerInfo // Mapping from layer (by index) to pulled down blob
|
||||||
blobAdditionalLayer map[digest.Digest]storage.AdditionalLayer // Mapping from layer blobsums to their corresponding additional layer
|
blobAdditionalLayer map[digest.Digest]storage.AdditionalLayer // Mapping from layer blobsums to their corresponding additional layer
|
||||||
|
diffOutputs map[digest.Digest]*graphdriver.DriverWithDifferOutput // Mapping from digest to differ output
|
||||||
}
|
}
|
||||||
|
|
||||||
type storageImageCloser struct {
|
type storageImageCloser struct {
|
||||||
|
@ -121,7 +125,7 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, imageRef stor
|
||||||
}
|
}
|
||||||
if img.Metadata != "" {
|
if img.Metadata != "" {
|
||||||
if err := json.Unmarshal([]byte(img.Metadata), image); err != nil {
|
if err := json.Unmarshal([]byte(img.Metadata), image); err != nil {
|
||||||
return nil, errors.Wrap(err, "error decoding metadata for source image")
|
return nil, errors.Wrap(err, "decoding metadata for source image")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return image, nil
|
return image, nil
|
||||||
|
@ -239,7 +243,7 @@ func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *di
|
||||||
key := manifestBigDataKey(*instanceDigest)
|
key := manifestBigDataKey(*instanceDigest)
|
||||||
blob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key)
|
blob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", errors.Wrapf(err, "error reading manifest for image instance %q", *instanceDigest)
|
return nil, "", errors.Wrapf(err, "reading manifest for image instance %q", *instanceDigest)
|
||||||
}
|
}
|
||||||
return blob, manifest.GuessMIMEType(blob), err
|
return blob, manifest.GuessMIMEType(blob), err
|
||||||
}
|
}
|
||||||
|
@ -276,14 +280,14 @@ func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *di
|
||||||
func (s *storageImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) {
|
func (s *storageImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) {
|
||||||
manifestBlob, manifestType, err := s.GetManifest(ctx, instanceDigest)
|
manifestBlob, manifestType, err := s.GetManifest(ctx, instanceDigest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "error reading image manifest for %q", s.image.ID)
|
return nil, errors.Wrapf(err, "reading image manifest for %q", s.image.ID)
|
||||||
}
|
}
|
||||||
if manifest.MIMETypeIsMultiImage(manifestType) {
|
if manifest.MIMETypeIsMultiImage(manifestType) {
|
||||||
return nil, errors.Errorf("can't copy layers for a manifest list (shouldn't be attempted)")
|
return nil, errors.Errorf("can't copy layers for a manifest list (shouldn't be attempted)")
|
||||||
}
|
}
|
||||||
man, err := manifest.FromBlob(manifestBlob, manifestType)
|
man, err := manifest.FromBlob(manifestBlob, manifestType)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "error parsing image manifest for %q", s.image.ID)
|
return nil, errors.Wrapf(err, "parsing image manifest for %q", s.image.ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
uncompressedLayerType := ""
|
uncompressedLayerType := ""
|
||||||
|
@ -299,7 +303,7 @@ func (s *storageImageSource) LayerInfosForCopy(ctx context.Context, instanceDige
|
||||||
for layerID != "" {
|
for layerID != "" {
|
||||||
layer, err := s.imageRef.transport.store.Layer(layerID)
|
layer, err := s.imageRef.transport.store.Layer(layerID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "error reading layer %q in image %q", layerID, s.image.ID)
|
return nil, errors.Wrapf(err, "reading layer %q in image %q", layerID, s.image.ID)
|
||||||
}
|
}
|
||||||
if layer.UncompressedDigest == "" {
|
if layer.UncompressedDigest == "" {
|
||||||
return nil, errors.Errorf("uncompressed digest for layer %q is unknown", layerID)
|
return nil, errors.Errorf("uncompressed digest for layer %q is unknown", layerID)
|
||||||
|
@ -318,7 +322,7 @@ func (s *storageImageSource) LayerInfosForCopy(ctx context.Context, instanceDige
|
||||||
|
|
||||||
res, err := buildLayerInfosForCopy(man.LayerInfos(), physicalBlobInfos)
|
res, err := buildLayerInfosForCopy(man.LayerInfos(), physicalBlobInfos)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "error creating LayerInfosForCopy of image %q", s.image.ID)
|
return nil, errors.Wrapf(err, "creating LayerInfosForCopy of image %q", s.image.ID)
|
||||||
}
|
}
|
||||||
return res, nil
|
return res, nil
|
||||||
}
|
}
|
||||||
|
@ -367,13 +371,13 @@ func (s *storageImageSource) GetSignatures(ctx context.Context, instanceDigest *
|
||||||
if len(signatureSizes) > 0 {
|
if len(signatureSizes) > 0 {
|
||||||
signatureBlob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key)
|
signatureBlob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "error looking up signatures data for image %q (%s)", s.image.ID, instance)
|
return nil, errors.Wrapf(err, "looking up signatures data for image %q (%s)", s.image.ID, instance)
|
||||||
}
|
}
|
||||||
signature = signatureBlob
|
signature = signatureBlob
|
||||||
}
|
}
|
||||||
for _, length := range signatureSizes {
|
for _, length := range signatureSizes {
|
||||||
if offset+length > len(signature) {
|
if offset+length > len(signature) {
|
||||||
return nil, errors.Wrapf(err, "error looking up signatures data for image %q (%s): expected at least %d bytes, only found %d", s.image.ID, instance, len(signature), offset+length)
|
return nil, errors.Wrapf(err, "looking up signatures data for image %q (%s): expected at least %d bytes, only found %d", s.image.ID, instance, len(signature), offset+length)
|
||||||
}
|
}
|
||||||
sigslice = append(sigslice, signature[offset:offset+length])
|
sigslice = append(sigslice, signature[offset:offset+length])
|
||||||
offset += length
|
offset += length
|
||||||
|
@ -389,7 +393,7 @@ func (s *storageImageSource) GetSignatures(ctx context.Context, instanceDigest *
|
||||||
func newImageDestination(sys *types.SystemContext, imageRef storageReference) (*storageImageDestination, error) {
|
func newImageDestination(sys *types.SystemContext, imageRef storageReference) (*storageImageDestination, error) {
|
||||||
directory, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(sys), "storage")
|
directory, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(sys), "storage")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "error creating a temporary directory")
|
return nil, errors.Wrapf(err, "creating a temporary directory")
|
||||||
}
|
}
|
||||||
image := &storageImageDestination{
|
image := &storageImageDestination{
|
||||||
imageRef: imageRef,
|
imageRef: imageRef,
|
||||||
|
@ -403,6 +407,7 @@ func newImageDestination(sys *types.SystemContext, imageRef storageReference) (*
|
||||||
SignaturesSizes: make(map[digest.Digest][]int),
|
SignaturesSizes: make(map[digest.Digest][]int),
|
||||||
indexToStorageID: make(map[int]*string),
|
indexToStorageID: make(map[int]*string),
|
||||||
indexToPulledLayerInfo: make(map[int]*manifest.LayerInfo),
|
indexToPulledLayerInfo: make(map[int]*manifest.LayerInfo),
|
||||||
|
diffOutputs: make(map[digest.Digest]*graphdriver.DriverWithDifferOutput),
|
||||||
}
|
}
|
||||||
return image, nil
|
return image, nil
|
||||||
}
|
}
|
||||||
|
@ -418,6 +423,11 @@ func (s *storageImageDestination) Close() error {
|
||||||
for _, al := range s.blobAdditionalLayer {
|
for _, al := range s.blobAdditionalLayer {
|
||||||
al.Release()
|
al.Release()
|
||||||
}
|
}
|
||||||
|
for _, v := range s.diffOutputs {
|
||||||
|
if v.Target != "" {
|
||||||
|
_ = s.imageRef.transport.store.CleanupStagingDirectory(v.Target)
|
||||||
|
}
|
||||||
|
}
|
||||||
return os.RemoveAll(s.directory)
|
return os.RemoveAll(s.directory)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -483,21 +493,21 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader,
|
||||||
filename := s.computeNextBlobCacheFile()
|
filename := s.computeNextBlobCacheFile()
|
||||||
file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600)
|
file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errorBlobInfo, errors.Wrapf(err, "error creating temporary file %q", filename)
|
return errorBlobInfo, errors.Wrapf(err, "creating temporary file %q", filename)
|
||||||
}
|
}
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
counter := ioutils.NewWriteCounter(hasher.Hash())
|
counter := ioutils.NewWriteCounter(hasher.Hash())
|
||||||
reader := io.TeeReader(io.TeeReader(stream, counter), file)
|
reader := io.TeeReader(io.TeeReader(stream, counter), file)
|
||||||
decompressed, err := archive.DecompressStream(reader)
|
decompressed, err := archive.DecompressStream(reader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errorBlobInfo, errors.Wrap(err, "error setting up to decompress blob")
|
return errorBlobInfo, errors.Wrap(err, "setting up to decompress blob")
|
||||||
}
|
}
|
||||||
// Copy the data to the file.
|
// Copy the data to the file.
|
||||||
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
|
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
|
||||||
_, err = io.Copy(diffID.Hash(), decompressed)
|
_, err = io.Copy(diffID.Hash(), decompressed)
|
||||||
decompressed.Close()
|
decompressed.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errorBlobInfo, errors.Wrapf(err, "error storing blob to file %q", filename)
|
return errorBlobInfo, errors.Wrapf(err, "storing blob to file %q", filename)
|
||||||
}
|
}
|
||||||
// Ensure that any information that we were given about the blob is correct.
|
// Ensure that any information that we were given about the blob is correct.
|
||||||
if blobinfo.Digest.Validate() == nil && blobinfo.Digest != hasher.Digest() {
|
if blobinfo.Digest.Validate() == nil && blobinfo.Digest != hasher.Digest() {
|
||||||
|
@ -556,7 +566,7 @@ func (s *storageImageDestination) tryReusingBlobWithSrcRef(ctx context.Context,
|
||||||
// Check if we have the layer in the underlying additional layer store.
|
// Check if we have the layer in the underlying additional layer store.
|
||||||
aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(blobinfo.Digest, ref.String())
|
aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(blobinfo.Digest, ref.String())
|
||||||
if err != nil && errors.Cause(err) != storage.ErrLayerUnknown {
|
if err != nil && errors.Cause(err) != storage.ErrLayerUnknown {
|
||||||
return false, types.BlobInfo{}, errors.Wrapf(err, `Error looking for compressed layers with digest %q and labels`, blobinfo.Digest)
|
return false, types.BlobInfo{}, errors.Wrapf(err, `looking for compressed layers with digest %q and labels`, blobinfo.Digest)
|
||||||
} else if err == nil {
|
} else if err == nil {
|
||||||
// Record the uncompressed value so that we can use it to calculate layer IDs.
|
// Record the uncompressed value so that we can use it to calculate layer IDs.
|
||||||
s.blobDiffIDs[blobinfo.Digest] = aLayer.UncompressedDigest()
|
s.blobDiffIDs[blobinfo.Digest] = aLayer.UncompressedDigest()
|
||||||
|
@ -572,6 +582,61 @@ func (s *storageImageDestination) tryReusingBlobWithSrcRef(ctx context.Context,
|
||||||
return s.tryReusingBlobLocked(ctx, blobinfo, cache, canSubstitute)
|
return s.tryReusingBlobLocked(ctx, blobinfo, cache, canSubstitute)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type zstdFetcher struct {
|
||||||
|
stream internalTypes.ImageSourceSeekable
|
||||||
|
ctx context.Context
|
||||||
|
blobInfo types.BlobInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBlobAt converts from chunked.GetBlobAt to ImageSourceSeekable.GetBlobAt.
|
||||||
|
func (f *zstdFetcher) GetBlobAt(chunks []chunked.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
|
||||||
|
var newChunks []internalTypes.ImageSourceChunk
|
||||||
|
for _, v := range chunks {
|
||||||
|
i := internalTypes.ImageSourceChunk{
|
||||||
|
Offset: v.Offset,
|
||||||
|
Length: v.Length,
|
||||||
|
}
|
||||||
|
newChunks = append(newChunks, i)
|
||||||
|
}
|
||||||
|
rc, errs, err := f.stream.GetBlobAt(f.ctx, f.blobInfo, newChunks)
|
||||||
|
if _, ok := err.(internalTypes.BadPartialRequestError); ok {
|
||||||
|
err = chunked.ErrBadRequest{}
|
||||||
|
}
|
||||||
|
return rc, errs, err
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutBlobPartial attempts to create a blob using the data that is already present at the destination storage. stream is accessed
|
||||||
|
// in a non-sequential way to retrieve the missing chunks.
|
||||||
|
func (s *storageImageDestination) PutBlobPartial(ctx context.Context, stream internalTypes.ImageSourceSeekable, srcInfo types.BlobInfo, cache types.BlobInfoCache) (types.BlobInfo, error) {
|
||||||
|
fetcher := zstdFetcher{
|
||||||
|
stream: stream,
|
||||||
|
ctx: ctx,
|
||||||
|
blobInfo: srcInfo,
|
||||||
|
}
|
||||||
|
|
||||||
|
differ, err := chunked.GetDiffer(ctx, s.imageRef.transport.store, srcInfo.Size, srcInfo.Annotations, &fetcher)
|
||||||
|
if err != nil {
|
||||||
|
return srcInfo, err
|
||||||
|
}
|
||||||
|
|
||||||
|
out, err := s.imageRef.transport.store.ApplyDiffWithDiffer("", nil, differ)
|
||||||
|
if err != nil {
|
||||||
|
return srcInfo, err
|
||||||
|
}
|
||||||
|
|
||||||
|
blobDigest := srcInfo.Digest
|
||||||
|
|
||||||
|
s.lock.Lock()
|
||||||
|
s.blobDiffIDs[blobDigest] = blobDigest
|
||||||
|
s.fileSizes[blobDigest] = 0
|
||||||
|
s.filenames[blobDigest] = ""
|
||||||
|
s.diffOutputs[blobDigest] = out
|
||||||
|
s.lock.Unlock()
|
||||||
|
|
||||||
|
return srcInfo, nil
|
||||||
|
}
|
||||||
|
|
||||||
// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||||
// info.Digest must not be empty.
|
// info.Digest must not be empty.
|
||||||
|
@ -611,7 +676,7 @@ func (s *storageImageDestination) tryReusingBlobLocked(ctx context.Context, blob
|
||||||
// Check if we have a wasn't-compressed layer in storage that's based on that blob.
|
// Check if we have a wasn't-compressed layer in storage that's based on that blob.
|
||||||
layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(blobinfo.Digest)
|
layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(blobinfo.Digest)
|
||||||
if err != nil && errors.Cause(err) != storage.ErrLayerUnknown {
|
if err != nil && errors.Cause(err) != storage.ErrLayerUnknown {
|
||||||
return false, types.BlobInfo{}, errors.Wrapf(err, `Error looking for layers with digest %q`, blobinfo.Digest)
|
return false, types.BlobInfo{}, errors.Wrapf(err, `looking for layers with digest %q`, blobinfo.Digest)
|
||||||
}
|
}
|
||||||
if len(layers) > 0 {
|
if len(layers) > 0 {
|
||||||
// Save this for completeness.
|
// Save this for completeness.
|
||||||
|
@ -626,7 +691,7 @@ func (s *storageImageDestination) tryReusingBlobLocked(ctx context.Context, blob
|
||||||
// Check if we have a was-compressed layer in storage that's based on that blob.
|
// Check if we have a was-compressed layer in storage that's based on that blob.
|
||||||
layers, err = s.imageRef.transport.store.LayersByCompressedDigest(blobinfo.Digest)
|
layers, err = s.imageRef.transport.store.LayersByCompressedDigest(blobinfo.Digest)
|
||||||
if err != nil && errors.Cause(err) != storage.ErrLayerUnknown {
|
if err != nil && errors.Cause(err) != storage.ErrLayerUnknown {
|
||||||
return false, types.BlobInfo{}, errors.Wrapf(err, `Error looking for compressed layers with digest %q`, blobinfo.Digest)
|
return false, types.BlobInfo{}, errors.Wrapf(err, `looking for compressed layers with digest %q`, blobinfo.Digest)
|
||||||
}
|
}
|
||||||
if len(layers) > 0 {
|
if len(layers) > 0 {
|
||||||
// Record the uncompressed value so that we can use it to calculate layer IDs.
|
// Record the uncompressed value so that we can use it to calculate layer IDs.
|
||||||
|
@ -645,7 +710,7 @@ func (s *storageImageDestination) tryReusingBlobLocked(ctx context.Context, blob
|
||||||
if uncompressedDigest := cache.UncompressedDigest(blobinfo.Digest); uncompressedDigest != "" && uncompressedDigest != blobinfo.Digest {
|
if uncompressedDigest := cache.UncompressedDigest(blobinfo.Digest); uncompressedDigest != "" && uncompressedDigest != blobinfo.Digest {
|
||||||
layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(uncompressedDigest)
|
layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(uncompressedDigest)
|
||||||
if err != nil && errors.Cause(err) != storage.ErrLayerUnknown {
|
if err != nil && errors.Cause(err) != storage.ErrLayerUnknown {
|
||||||
return false, types.BlobInfo{}, errors.Wrapf(err, `Error looking for layers with digest %q`, uncompressedDigest)
|
return false, types.BlobInfo{}, errors.Wrapf(err, `looking for layers with digest %q`, uncompressedDigest)
|
||||||
}
|
}
|
||||||
if len(layers) > 0 {
|
if len(layers) > 0 {
|
||||||
if blobinfo.Size != -1 {
|
if blobinfo.Size != -1 {
|
||||||
|
@ -720,7 +785,7 @@ func (s *storageImageDestination) getConfigBlob(info types.BlobInfo) ([]byte, er
|
||||||
if filename, ok := s.filenames[info.Digest]; ok {
|
if filename, ok := s.filenames[info.Digest]; ok {
|
||||||
contents, err2 := ioutil.ReadFile(filename)
|
contents, err2 := ioutil.ReadFile(filename)
|
||||||
if err2 != nil {
|
if err2 != nil {
|
||||||
return nil, errors.Wrapf(err2, `error reading blob from file %q`, filename)
|
return nil, errors.Wrapf(err2, `reading blob from file %q`, filename)
|
||||||
}
|
}
|
||||||
return contents, nil
|
return contents, nil
|
||||||
}
|
}
|
||||||
|
@ -822,7 +887,7 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest
|
||||||
// NOTE: use `TryReusingBlob` to prevent recursion.
|
// NOTE: use `TryReusingBlob` to prevent recursion.
|
||||||
has, _, err := s.TryReusingBlob(ctx, blob.BlobInfo, none.NoCache, false)
|
has, _, err := s.TryReusingBlob(ctx, blob.BlobInfo, none.NoCache, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "error checking for a layer based on blob %q", blob.Digest.String())
|
return errors.Wrapf(err, "checking for a layer based on blob %q", blob.Digest.String())
|
||||||
}
|
}
|
||||||
if !has {
|
if !has {
|
||||||
return errors.Errorf("error determining uncompressed digest for blob %q", blob.Digest.String())
|
return errors.Errorf("error determining uncompressed digest for blob %q", blob.Digest.String())
|
||||||
|
@ -843,6 +908,27 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
s.lock.Lock()
|
||||||
|
diffOutput, ok := s.diffOutputs[blob.Digest]
|
||||||
|
s.lock.Unlock()
|
||||||
|
if ok {
|
||||||
|
layer, err := s.imageRef.transport.store.CreateLayer(id, lastLayer, nil, "", false, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// FIXME: what to do with the uncompressed digest?
|
||||||
|
diffOutput.UncompressedDigest = blob.Digest
|
||||||
|
|
||||||
|
if err := s.imageRef.transport.store.ApplyDiffFromStagingDirectory(layer.ID, diffOutput.Target, diffOutput, nil); err != nil {
|
||||||
|
_ = s.imageRef.transport.store.Delete(layer.ID)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
s.indexToStorageID[index] = &layer.ID
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
s.lock.Lock()
|
s.lock.Lock()
|
||||||
al, ok := s.blobAdditionalLayer[blob.Digest]
|
al, ok := s.blobAdditionalLayer[blob.Digest]
|
||||||
s.lock.Unlock()
|
s.lock.Unlock()
|
||||||
|
@ -874,7 +960,7 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if layer == "" {
|
if layer == "" {
|
||||||
return errors.Wrapf(err2, "error locating layer for blob %q", blob.Digest)
|
return errors.Wrapf(err2, "locating layer for blob %q", blob.Digest)
|
||||||
}
|
}
|
||||||
// Read the layer's contents.
|
// Read the layer's contents.
|
||||||
noCompression := archive.Uncompressed
|
noCompression := archive.Uncompressed
|
||||||
|
@ -883,7 +969,7 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest
|
||||||
}
|
}
|
||||||
diff, err2 := s.imageRef.transport.store.Diff("", layer, diffOptions)
|
diff, err2 := s.imageRef.transport.store.Diff("", layer, diffOptions)
|
||||||
if err2 != nil {
|
if err2 != nil {
|
||||||
return errors.Wrapf(err2, "error reading layer %q for blob %q", layer, blob.Digest)
|
return errors.Wrapf(err2, "reading layer %q for blob %q", layer, blob.Digest)
|
||||||
}
|
}
|
||||||
// Copy the layer diff to a file. Diff() takes a lock that it holds
|
// Copy the layer diff to a file. Diff() takes a lock that it holds
|
||||||
// until the ReadCloser that it returns is closed, and PutLayer() wants
|
// until the ReadCloser that it returns is closed, and PutLayer() wants
|
||||||
|
@ -893,7 +979,7 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest
|
||||||
file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600)
|
file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
diff.Close()
|
diff.Close()
|
||||||
return errors.Wrapf(err, "error creating temporary file %q", filename)
|
return errors.Wrapf(err, "creating temporary file %q", filename)
|
||||||
}
|
}
|
||||||
// Copy the data to the file.
|
// Copy the data to the file.
|
||||||
// TODO: This can take quite some time, and should ideally be cancellable using
|
// TODO: This can take quite some time, and should ideally be cancellable using
|
||||||
|
@ -902,7 +988,7 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest
|
||||||
diff.Close()
|
diff.Close()
|
||||||
file.Close()
|
file.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "error storing blob to file %q", filename)
|
return errors.Wrapf(err, "storing blob to file %q", filename)
|
||||||
}
|
}
|
||||||
// Make sure that we can find this file later, should we need the layer's
|
// Make sure that we can find this file later, should we need the layer's
|
||||||
// contents again.
|
// contents again.
|
||||||
|
@ -913,27 +999,34 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest
|
||||||
// Read the cached blob and use it as a diff.
|
// Read the cached blob and use it as a diff.
|
||||||
file, err := os.Open(filename)
|
file, err := os.Open(filename)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "error opening file %q", filename)
|
return errors.Wrapf(err, "opening file %q", filename)
|
||||||
}
|
}
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
// Build the new layer using the diff, regardless of where it came from.
|
// Build the new layer using the diff, regardless of where it came from.
|
||||||
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
|
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
|
||||||
layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, nil, file)
|
layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, nil, file)
|
||||||
if err != nil && errors.Cause(err) != storage.ErrDuplicateID {
|
if err != nil && errors.Cause(err) != storage.ErrDuplicateID {
|
||||||
return errors.Wrapf(err, "error adding layer with blob %q", blob.Digest)
|
return errors.Wrapf(err, "adding layer with blob %q", blob.Digest)
|
||||||
}
|
}
|
||||||
|
|
||||||
s.indexToStorageID[index] = &layer.ID
|
s.indexToStorageID[index] = &layer.ID
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
|
||||||
|
// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
|
||||||
|
// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
|
||||||
|
// original manifest list digest, if desired.
|
||||||
|
// WARNING: This does not have any transactional semantics:
|
||||||
|
// - Uploaded data MAY be visible to others before Commit() is called
|
||||||
|
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
|
||||||
func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
|
func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
|
||||||
if len(s.manifest) == 0 {
|
if len(s.manifest) == 0 {
|
||||||
return errors.New("Internal error: storageImageDestination.Commit() called without PutManifest()")
|
return errors.New("Internal error: storageImageDestination.Commit() called without PutManifest()")
|
||||||
}
|
}
|
||||||
toplevelManifest, _, err := unparsedToplevel.Manifest(ctx)
|
toplevelManifest, _, err := unparsedToplevel.Manifest(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "error retrieving top-level manifest")
|
return errors.Wrapf(err, "retrieving top-level manifest")
|
||||||
}
|
}
|
||||||
// If the name we're saving to includes a digest, then check that the
|
// If the name we're saving to includes a digest, then check that the
|
||||||
// manifests that we're about to save all either match the one from the
|
// manifests that we're about to save all either match the one from the
|
||||||
|
@ -956,14 +1049,12 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Find the list of layer blobs.
|
// Find the list of layer blobs.
|
||||||
if len(s.manifest) == 0 {
|
|
||||||
return errors.New("Internal error: storageImageDestination.Commit() called without PutManifest()")
|
|
||||||
}
|
|
||||||
man, err := manifest.FromBlob(s.manifest, manifest.GuessMIMEType(s.manifest))
|
man, err := manifest.FromBlob(s.manifest, manifest.GuessMIMEType(s.manifest))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "error parsing manifest")
|
return errors.Wrapf(err, "parsing manifest")
|
||||||
}
|
}
|
||||||
layerBlobs := man.LayerInfos()
|
layerBlobs := man.LayerInfos()
|
||||||
|
|
||||||
// Extract, commit, or find the layers.
|
// Extract, commit, or find the layers.
|
||||||
for i, blob := range layerBlobs {
|
for i, blob := range layerBlobs {
|
||||||
if err := s.commitLayer(ctx, blob, i); err != nil {
|
if err := s.commitLayer(ctx, blob, i); err != nil {
|
||||||
|
@ -996,11 +1087,11 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Cause(err) != storage.ErrDuplicateID {
|
if errors.Cause(err) != storage.ErrDuplicateID {
|
||||||
logrus.Debugf("error creating image: %q", err)
|
logrus.Debugf("error creating image: %q", err)
|
||||||
return errors.Wrapf(err, "error creating image %q", intendedID)
|
return errors.Wrapf(err, "creating image %q", intendedID)
|
||||||
}
|
}
|
||||||
img, err = s.imageRef.transport.store.Image(intendedID)
|
img, err = s.imageRef.transport.store.Image(intendedID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "error reading image %q", intendedID)
|
return errors.Wrapf(err, "reading image %q", intendedID)
|
||||||
}
|
}
|
||||||
if img.TopLayer != lastLayer {
|
if img.TopLayer != lastLayer {
|
||||||
logrus.Debugf("error creating image: image with ID %q exists, but uses different layers", intendedID)
|
logrus.Debugf("error creating image: image with ID %q exists, but uses different layers", intendedID)
|
||||||
|
@ -1011,6 +1102,19 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t
|
||||||
} else {
|
} else {
|
||||||
logrus.Debugf("created new image ID %q", img.ID)
|
logrus.Debugf("created new image ID %q", img.ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Clean up the unfinished image on any error.
|
||||||
|
// (Is this the right thing to do if the image has existed before?)
|
||||||
|
commitSucceeded := false
|
||||||
|
defer func() {
|
||||||
|
if !commitSucceeded {
|
||||||
|
logrus.Errorf("Updating image %q (old names %v) failed, deleting it", img.ID, oldNames)
|
||||||
|
if _, err := s.imageRef.transport.store.DeleteImage(img.ID, true); err != nil {
|
||||||
|
logrus.Errorf("Error deleting incomplete image %q: %v", img.ID, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
// Add the non-layer blobs as data items. Since we only share layers, they should all be in files, so
|
// Add the non-layer blobs as data items. Since we only share layers, they should all be in files, so
|
||||||
// we just need to screen out the ones that are actually layers to get the list of non-layers.
|
// we just need to screen out the ones that are actually layers to get the list of non-layers.
|
||||||
dataBlobs := make(map[digest.Digest]struct{})
|
dataBlobs := make(map[digest.Digest]struct{})
|
||||||
|
@ -1023,90 +1127,62 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t
|
||||||
for blob := range dataBlobs {
|
for blob := range dataBlobs {
|
||||||
v, err := ioutil.ReadFile(s.filenames[blob])
|
v, err := ioutil.ReadFile(s.filenames[blob])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "error copying non-layer blob %q to image", blob)
|
return errors.Wrapf(err, "copying non-layer blob %q to image", blob)
|
||||||
}
|
}
|
||||||
if err := s.imageRef.transport.store.SetImageBigData(img.ID, blob.String(), v, manifest.Digest); err != nil {
|
if err := s.imageRef.transport.store.SetImageBigData(img.ID, blob.String(), v, manifest.Digest); err != nil {
|
||||||
if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
|
|
||||||
logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
|
|
||||||
}
|
|
||||||
logrus.Debugf("error saving big data %q for image %q: %v", blob.String(), img.ID, err)
|
logrus.Debugf("error saving big data %q for image %q: %v", blob.String(), img.ID, err)
|
||||||
return errors.Wrapf(err, "error saving big data %q for image %q", blob.String(), img.ID)
|
return errors.Wrapf(err, "saving big data %q for image %q", blob.String(), img.ID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Save the unparsedToplevel's manifest.
|
// Save the unparsedToplevel's manifest if it differs from the per-platform one, which is saved below.
|
||||||
if len(toplevelManifest) != 0 {
|
if len(toplevelManifest) != 0 && !bytes.Equal(toplevelManifest, s.manifest) {
|
||||||
manifestDigest, err := manifest.Digest(toplevelManifest)
|
manifestDigest, err := manifest.Digest(toplevelManifest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "error digesting top-level manifest")
|
return errors.Wrapf(err, "digesting top-level manifest")
|
||||||
}
|
}
|
||||||
key := manifestBigDataKey(manifestDigest)
|
key := manifestBigDataKey(manifestDigest)
|
||||||
if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, toplevelManifest, manifest.Digest); err != nil {
|
if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, toplevelManifest, manifest.Digest); err != nil {
|
||||||
if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
|
|
||||||
logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
|
|
||||||
}
|
|
||||||
logrus.Debugf("error saving top-level manifest for image %q: %v", img.ID, err)
|
logrus.Debugf("error saving top-level manifest for image %q: %v", img.ID, err)
|
||||||
return errors.Wrapf(err, "error saving top-level manifest for image %q", img.ID)
|
return errors.Wrapf(err, "saving top-level manifest for image %q", img.ID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Save the image's manifest. Allow looking it up by digest by using the key convention defined by the Store.
|
// Save the image's manifest. Allow looking it up by digest by using the key convention defined by the Store.
|
||||||
// Record the manifest twice: using a digest-specific key to allow references to that specific digest instance,
|
// Record the manifest twice: using a digest-specific key to allow references to that specific digest instance,
|
||||||
// and using storage.ImageDigestBigDataKey for future users that don’t specify any digest and for compatibility with older readers.
|
// and using storage.ImageDigestBigDataKey for future users that don’t specify any digest and for compatibility with older readers.
|
||||||
manifestDigest, err := manifest.Digest(s.manifest)
|
key := manifestBigDataKey(s.manifestDigest)
|
||||||
if err != nil {
|
|
||||||
return errors.Wrapf(err, "error computing manifest digest")
|
|
||||||
}
|
|
||||||
key := manifestBigDataKey(manifestDigest)
|
|
||||||
if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, s.manifest, manifest.Digest); err != nil {
|
if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, s.manifest, manifest.Digest); err != nil {
|
||||||
if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
|
|
||||||
logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
|
|
||||||
}
|
|
||||||
logrus.Debugf("error saving manifest for image %q: %v", img.ID, err)
|
logrus.Debugf("error saving manifest for image %q: %v", img.ID, err)
|
||||||
return errors.Wrapf(err, "error saving manifest for image %q", img.ID)
|
return errors.Wrapf(err, "saving manifest for image %q", img.ID)
|
||||||
}
|
}
|
||||||
key = storage.ImageDigestBigDataKey
|
key = storage.ImageDigestBigDataKey
|
||||||
if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, s.manifest, manifest.Digest); err != nil {
|
if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, s.manifest, manifest.Digest); err != nil {
|
||||||
if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
|
|
||||||
logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
|
|
||||||
}
|
|
||||||
logrus.Debugf("error saving manifest for image %q: %v", img.ID, err)
|
logrus.Debugf("error saving manifest for image %q: %v", img.ID, err)
|
||||||
return errors.Wrapf(err, "error saving manifest for image %q", img.ID)
|
return errors.Wrapf(err, "saving manifest for image %q", img.ID)
|
||||||
}
|
}
|
||||||
// Save the signatures, if we have any.
|
// Save the signatures, if we have any.
|
||||||
if len(s.signatures) > 0 {
|
if len(s.signatures) > 0 {
|
||||||
if err := s.imageRef.transport.store.SetImageBigData(img.ID, "signatures", s.signatures, manifest.Digest); err != nil {
|
if err := s.imageRef.transport.store.SetImageBigData(img.ID, "signatures", s.signatures, manifest.Digest); err != nil {
|
||||||
if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
|
|
||||||
logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
|
|
||||||
}
|
|
||||||
logrus.Debugf("error saving signatures for image %q: %v", img.ID, err)
|
logrus.Debugf("error saving signatures for image %q: %v", img.ID, err)
|
||||||
return errors.Wrapf(err, "error saving signatures for image %q", img.ID)
|
return errors.Wrapf(err, "saving signatures for image %q", img.ID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for instanceDigest, signatures := range s.signatureses {
|
for instanceDigest, signatures := range s.signatureses {
|
||||||
key := signatureBigDataKey(instanceDigest)
|
key := signatureBigDataKey(instanceDigest)
|
||||||
if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, signatures, manifest.Digest); err != nil {
|
if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, signatures, manifest.Digest); err != nil {
|
||||||
if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
|
|
||||||
logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
|
|
||||||
}
|
|
||||||
logrus.Debugf("error saving signatures for image %q: %v", img.ID, err)
|
logrus.Debugf("error saving signatures for image %q: %v", img.ID, err)
|
||||||
return errors.Wrapf(err, "error saving signatures for image %q", img.ID)
|
return errors.Wrapf(err, "saving signatures for image %q", img.ID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Save our metadata.
|
// Save our metadata.
|
||||||
metadata, err := json.Marshal(s)
|
metadata, err := json.Marshal(s)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
|
|
||||||
logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
|
|
||||||
}
|
|
||||||
logrus.Debugf("error encoding metadata for image %q: %v", img.ID, err)
|
logrus.Debugf("error encoding metadata for image %q: %v", img.ID, err)
|
||||||
return errors.Wrapf(err, "error encoding metadata for image %q", img.ID)
|
return errors.Wrapf(err, "encoding metadata for image %q", img.ID)
|
||||||
}
|
}
|
||||||
if len(metadata) != 0 {
|
if len(metadata) != 0 {
|
||||||
if err = s.imageRef.transport.store.SetMetadata(img.ID, string(metadata)); err != nil {
|
if err = s.imageRef.transport.store.SetMetadata(img.ID, string(metadata)); err != nil {
|
||||||
if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
|
|
||||||
logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
|
|
||||||
}
|
|
||||||
logrus.Debugf("error saving metadata for image %q: %v", img.ID, err)
|
logrus.Debugf("error saving metadata for image %q: %v", img.ID, err)
|
||||||
return errors.Wrapf(err, "error saving metadata for image %q", img.ID)
|
return errors.Wrapf(err, "saving metadata for image %q", img.ID)
|
||||||
}
|
}
|
||||||
logrus.Debugf("saved image metadata %q", string(metadata))
|
logrus.Debugf("saved image metadata %q", string(metadata))
|
||||||
}
|
}
|
||||||
|
@ -1121,14 +1197,13 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t
|
||||||
names = append(names, oldNames...)
|
names = append(names, oldNames...)
|
||||||
}
|
}
|
||||||
if err := s.imageRef.transport.store.SetNames(img.ID, names); err != nil {
|
if err := s.imageRef.transport.store.SetNames(img.ID, names); err != nil {
|
||||||
if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
|
|
||||||
logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
|
|
||||||
}
|
|
||||||
logrus.Debugf("error setting names %v on image %q: %v", names, img.ID, err)
|
logrus.Debugf("error setting names %v on image %q: %v", names, img.ID, err)
|
||||||
return errors.Wrapf(err, "error setting names %v on image %q", names, img.ID)
|
return errors.Wrapf(err, "setting names %v on image %q", names, img.ID)
|
||||||
}
|
}
|
||||||
logrus.Debugf("set names of image %q to %v", img.ID, names)
|
logrus.Debugf("set names of image %q to %v", img.ID, names)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
commitSucceeded = true
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1145,9 +1220,14 @@ func (s *storageImageDestination) SupportedManifestMIMETypes() []string {
|
||||||
|
|
||||||
// PutManifest writes the manifest to the destination.
|
// PutManifest writes the manifest to the destination.
|
||||||
func (s *storageImageDestination) PutManifest(ctx context.Context, manifestBlob []byte, instanceDigest *digest.Digest) error {
|
func (s *storageImageDestination) PutManifest(ctx context.Context, manifestBlob []byte, instanceDigest *digest.Digest) error {
|
||||||
|
digest, err := manifest.Digest(manifestBlob)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
newBlob := make([]byte, len(manifestBlob))
|
newBlob := make([]byte, len(manifestBlob))
|
||||||
copy(newBlob, manifestBlob)
|
copy(newBlob, manifestBlob)
|
||||||
s.manifest = newBlob
|
s.manifest = newBlob
|
||||||
|
s.manifestDigest = digest
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1189,13 +1269,10 @@ func (s *storageImageDestination) PutSignatures(ctx context.Context, signatures
|
||||||
if instanceDigest == nil {
|
if instanceDigest == nil {
|
||||||
s.signatures = sigblob
|
s.signatures = sigblob
|
||||||
s.SignatureSizes = sizes
|
s.SignatureSizes = sizes
|
||||||
}
|
if len(s.manifest) > 0 {
|
||||||
if instanceDigest == nil && len(s.manifest) > 0 {
|
manifestDigest := s.manifestDigest
|
||||||
manifestDigest, err := manifest.Digest(s.manifest)
|
instanceDigest = &manifestDigest
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
instanceDigest = &manifestDigest
|
|
||||||
}
|
}
|
||||||
if instanceDigest != nil {
|
if instanceDigest != nil {
|
||||||
s.signatureses[*instanceDigest] = sigblob
|
s.signatureses[*instanceDigest] = sigblob
|
||||||
|
@ -1211,12 +1288,12 @@ func (s *storageImageSource) getSize() (int64, error) {
|
||||||
// Size up the data blobs.
|
// Size up the data blobs.
|
||||||
dataNames, err := s.imageRef.transport.store.ListImageBigData(s.image.ID)
|
dataNames, err := s.imageRef.transport.store.ListImageBigData(s.image.ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return -1, errors.Wrapf(err, "error reading image %q", s.image.ID)
|
return -1, errors.Wrapf(err, "reading image %q", s.image.ID)
|
||||||
}
|
}
|
||||||
for _, dataName := range dataNames {
|
for _, dataName := range dataNames {
|
||||||
bigSize, err := s.imageRef.transport.store.ImageBigDataSize(s.image.ID, dataName)
|
bigSize, err := s.imageRef.transport.store.ImageBigDataSize(s.image.ID, dataName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return -1, errors.Wrapf(err, "error reading data blob size %q for %q", dataName, s.image.ID)
|
return -1, errors.Wrapf(err, "reading data blob size %q for %q", dataName, s.image.ID)
|
||||||
}
|
}
|
||||||
sum += bigSize
|
sum += bigSize
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,7 +11,6 @@ import (
|
||||||
"github.com/containers/image/v5/types"
|
"github.com/containers/image/v5/types"
|
||||||
"github.com/containers/storage"
|
"github.com/containers/storage"
|
||||||
digest "github.com/opencontainers/go-digest"
|
digest "github.com/opencontainers/go-digest"
|
||||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
@ -62,18 +61,17 @@ func imageMatchesRepo(image *storage.Image, ref reference.Named) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// imageMatchesSystemContext checks if the passed-in image both contains a
|
// multiArchImageMatchesSystemContext returns true if if the passed-in image both contains a
|
||||||
// manifest that matches the passed-in digest, and identifies itself as being
|
// multi-arch manifest that matches the passed-in digest, and the image is the per-platform
|
||||||
// appropriate for running on the system that matches sys.
|
// image instance that matches sys.
|
||||||
// If we somehow ended up sharing the same storage among multiple types of
|
//
|
||||||
// systems, and managed to download multiple images from the same manifest
|
// See the comment in storageReference.ResolveImage explaining why
|
||||||
// list, their image records will all contain copies of the manifest list, and
|
// this check is necessary.
|
||||||
// this check will help us decide which of them we want to return when we've
|
func multiArchImageMatchesSystemContext(store storage.Store, img *storage.Image, manifestDigest digest.Digest, sys *types.SystemContext) bool {
|
||||||
// been asked to resolve an image reference that uses the list's digest to a
|
// Load the manifest that matches the specified digest.
|
||||||
// specific image ID.
|
// We don't need to care about storage.ImageDigestBigDataKey because
|
||||||
func imageMatchesSystemContext(store storage.Store, img *storage.Image, manifestDigest digest.Digest, sys *types.SystemContext) bool {
|
// manifests lists are only stored into storage by c/image versions
|
||||||
// First, check if the image record has a manifest that matches the
|
// that know about manifestBigDataKey, and only using that key.
|
||||||
// specified digest.
|
|
||||||
key := manifestBigDataKey(manifestDigest)
|
key := manifestBigDataKey(manifestDigest)
|
||||||
manifestBytes, err := store.ImageBigData(img.ID, key)
|
manifestBytes, err := store.ImageBigData(img.ID, key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -83,56 +81,22 @@ func imageMatchesSystemContext(store storage.Store, img *storage.Image, manifest
|
||||||
// the digest of the instance that matches the current system, and try
|
// the digest of the instance that matches the current system, and try
|
||||||
// to load that manifest from the image record, and use it.
|
// to load that manifest from the image record, and use it.
|
||||||
manifestType := manifest.GuessMIMEType(manifestBytes)
|
manifestType := manifest.GuessMIMEType(manifestBytes)
|
||||||
if manifest.MIMETypeIsMultiImage(manifestType) {
|
if !manifest.MIMETypeIsMultiImage(manifestType) {
|
||||||
list, err := manifest.ListFromBlob(manifestBytes, manifestType)
|
// manifestDigest directly specifies a per-platform image, so we aren't
|
||||||
if err != nil {
|
// choosing among different variants.
|
||||||
return false
|
return false
|
||||||
}
|
|
||||||
manifestDigest, err = list.ChooseInstance(sys)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
key = manifestBigDataKey(manifestDigest)
|
|
||||||
manifestBytes, err = store.ImageBigData(img.ID, key)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
manifestType = manifest.GuessMIMEType(manifestBytes)
|
|
||||||
}
|
}
|
||||||
// Load the image's configuration blob.
|
list, err := manifest.ListFromBlob(manifestBytes, manifestType)
|
||||||
m, err := manifest.FromBlob(manifestBytes, manifestType)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
getConfig := func(blobInfo types.BlobInfo) ([]byte, error) {
|
chosenInstance, err := list.ChooseInstance(sys)
|
||||||
return store.ImageBigData(img.ID, blobInfo.Digest.String())
|
|
||||||
}
|
|
||||||
ii, err := m.Inspect(getConfig)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
// Build a dummy index containing one instance and information about
|
key = manifestBigDataKey(chosenInstance)
|
||||||
// the image's target system from the image's configuration.
|
|
||||||
index := manifest.OCI1IndexFromComponents([]imgspecv1.Descriptor{{
|
|
||||||
MediaType: imgspecv1.MediaTypeImageManifest,
|
|
||||||
Digest: manifestDigest,
|
|
||||||
Size: int64(len(manifestBytes)),
|
|
||||||
Platform: &imgspecv1.Platform{
|
|
||||||
OS: ii.Os,
|
|
||||||
Architecture: ii.Architecture,
|
|
||||||
},
|
|
||||||
}}, nil)
|
|
||||||
// Check that ChooseInstance() would select this image for this system,
|
|
||||||
// from a list of images.
|
|
||||||
instanceDigest, err := index.ChooseInstance(sys)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
// Double-check that we can read the runnable image's manifest from the
|
|
||||||
// image record.
|
|
||||||
key = manifestBigDataKey(instanceDigest)
|
|
||||||
_, err = store.ImageBigData(img.ID, key)
|
_, err = store.ImageBigData(img.ID, key)
|
||||||
return err == nil
|
return err == nil // true if img.ID is based on chosenInstance.
|
||||||
}
|
}
|
||||||
|
|
||||||
// Resolve the reference's name to an image ID in the store, if there's already
|
// Resolve the reference's name to an image ID in the store, if there's already
|
||||||
|
@ -152,11 +116,24 @@ func (s *storageReference) resolveImage(sys *types.SystemContext) (*storage.Imag
|
||||||
// Look for an image with the specified digest that has the same name,
|
// Look for an image with the specified digest that has the same name,
|
||||||
// though possibly with a different tag or digest, as a Name value, so
|
// though possibly with a different tag or digest, as a Name value, so
|
||||||
// that the canonical reference can be implicitly resolved to the image.
|
// that the canonical reference can be implicitly resolved to the image.
|
||||||
|
//
|
||||||
|
// Typically there should be at most one such image, because the same
|
||||||
|
// manifest digest implies the same config, and we choose the storage ID
|
||||||
|
// based on the config (deduplicating images), except:
|
||||||
|
// - the user can explicitly specify an ID when creating the image.
|
||||||
|
// In this case we don't have a preference among the alternatives.
|
||||||
|
// - when pulling an image from a multi-platform manifest list, we also
|
||||||
|
// store the manifest list in the image; this allows referencing a
|
||||||
|
// per-platform image using the manifest list digest, but that also
|
||||||
|
// means that we can have multiple genuinely different images in the
|
||||||
|
// storage matching the same manifest list digest (if pulled using different
|
||||||
|
// SystemContext.{OS,Architecture,Variant}Choice to the same storage).
|
||||||
|
// In this case we prefer the image matching the current SystemContext.
|
||||||
images, err := s.transport.store.ImagesByDigest(digested.Digest())
|
images, err := s.transport.store.ImagesByDigest(digested.Digest())
|
||||||
if err == nil && len(images) > 0 {
|
if err == nil && len(images) > 0 {
|
||||||
for _, image := range images {
|
for _, image := range images {
|
||||||
if imageMatchesRepo(image, s.named) {
|
if imageMatchesRepo(image, s.named) {
|
||||||
if loadedImage == nil || imageMatchesSystemContext(s.transport.store, image, digested.Digest(), sys) {
|
if loadedImage == nil || multiArchImageMatchesSystemContext(s.transport.store, image, digested.Digest(), sys) {
|
||||||
loadedImage = image
|
loadedImage = image
|
||||||
s.id = image.ID
|
s.id = image.ID
|
||||||
}
|
}
|
||||||
|
@ -172,7 +149,7 @@ func (s *storageReference) resolveImage(sys *types.SystemContext) (*storage.Imag
|
||||||
if loadedImage == nil {
|
if loadedImage == nil {
|
||||||
img, err := s.transport.store.Image(s.id)
|
img, err := s.transport.store.Image(s.id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "error reading image %q", s.id)
|
return nil, errors.Wrapf(err, "reading image %q", s.id)
|
||||||
}
|
}
|
||||||
loadedImage = img
|
loadedImage = img
|
||||||
}
|
}
|
||||||
|
|
|
@ -172,7 +172,7 @@ func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (
|
||||||
var err error
|
var err error
|
||||||
named, err = reference.ParseNormalizedNamed(ref)
|
named, err = reference.ParseNormalizedNamed(ref)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "error parsing named reference %q", ref)
|
return nil, errors.Wrapf(err, "parsing named reference %q", ref)
|
||||||
}
|
}
|
||||||
named = reference.TagNameOnly(named)
|
named = reference.TagNameOnly(named)
|
||||||
}
|
}
|
||||||
|
@ -303,7 +303,7 @@ func (s storageTransport) GetStoreImage(store storage.Store, ref types.ImageRefe
|
||||||
}
|
}
|
||||||
if sref, ok := ref.(*storageReference); ok {
|
if sref, ok := ref.(*storageReference); ok {
|
||||||
tmpRef := *sref
|
tmpRef := *sref
|
||||||
if img, err := tmpRef.resolveImage(&types.SystemContext{}); err == nil {
|
if img, err := tmpRef.resolveImage(nil); err == nil {
|
||||||
return img, nil
|
return img, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -334,6 +334,9 @@ type ImageDestination interface {
|
||||||
// MUST be called after PutManifest (signatures may reference manifest contents).
|
// MUST be called after PutManifest (signatures may reference manifest contents).
|
||||||
PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error
|
PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error
|
||||||
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
|
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
|
||||||
|
// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
|
||||||
|
// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
|
||||||
|
// original manifest list digest, if desired.
|
||||||
// WARNING: This does not have any transactional semantics:
|
// WARNING: This does not have any transactional semantics:
|
||||||
// - Uploaded data MAY be visible to others before Commit() is called
|
// - Uploaded data MAY be visible to others before Commit() is called
|
||||||
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
|
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
|
||||||
|
@ -595,12 +598,12 @@ type SystemContext struct {
|
||||||
// === docker.Transport overrides ===
|
// === docker.Transport overrides ===
|
||||||
// If not "", a directory containing a CA certificate (ending with ".crt"),
|
// If not "", a directory containing a CA certificate (ending with ".crt"),
|
||||||
// a client certificate (ending with ".cert") and a client certificate key
|
// a client certificate (ending with ".cert") and a client certificate key
|
||||||
// (ending with ".key") used when talking to a Docker Registry.
|
// (ending with ".key") used when talking to a container registry.
|
||||||
DockerCertPath string
|
DockerCertPath string
|
||||||
// If not "", overrides the system’s default path for a directory containing host[:port] subdirectories with the same structure as DockerCertPath above.
|
// If not "", overrides the system’s default path for a directory containing host[:port] subdirectories with the same structure as DockerCertPath above.
|
||||||
// Ignored if DockerCertPath is non-empty.
|
// Ignored if DockerCertPath is non-empty.
|
||||||
DockerPerHostCertDirPath string
|
DockerPerHostCertDirPath string
|
||||||
// Allow contacting docker registries over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections.
|
// Allow contacting container registries over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections.
|
||||||
DockerInsecureSkipTLSVerify OptionalBool
|
DockerInsecureSkipTLSVerify OptionalBool
|
||||||
// if nil, the library tries to parse ~/.docker/config.json to retrieve credentials
|
// if nil, the library tries to parse ~/.docker/config.json to retrieve credentials
|
||||||
// Ignored if DockerBearerRegistryToken is non-empty.
|
// Ignored if DockerBearerRegistryToken is non-empty.
|
||||||
|
|
|
@ -6,9 +6,9 @@ const (
|
||||||
// VersionMajor is for an API incompatible changes
|
// VersionMajor is for an API incompatible changes
|
||||||
VersionMajor = 5
|
VersionMajor = 5
|
||||||
// VersionMinor is for functionality in a backwards-compatible manner
|
// VersionMinor is for functionality in a backwards-compatible manner
|
||||||
VersionMinor = 13
|
VersionMinor = 14
|
||||||
// VersionPatch is for backwards-compatible bug fixes
|
// VersionPatch is for backwards-compatible bug fixes
|
||||||
VersionPatch = 2
|
VersionPatch = 0
|
||||||
|
|
||||||
// VersionDev indicates development branch. Releases will be empty string.
|
// VersionDev indicates development branch. Releases will be empty string.
|
||||||
VersionDev = ""
|
VersionDev = ""
|
||||||
|
|
|
@ -0,0 +1,169 @@
|
||||||
|
package chunked
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"github.com/containers/storage/pkg/chunked/compressor"
|
||||||
|
"github.com/containers/storage/pkg/chunked/internal"
|
||||||
|
"github.com/klauspost/compress/zstd"
|
||||||
|
digest "github.com/opencontainers/go-digest"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/vbatts/tar-split/archive/tar"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
TypeReg = internal.TypeReg
|
||||||
|
TypeChunk = internal.TypeChunk
|
||||||
|
TypeLink = internal.TypeLink
|
||||||
|
TypeChar = internal.TypeChar
|
||||||
|
TypeBlock = internal.TypeBlock
|
||||||
|
TypeDir = internal.TypeDir
|
||||||
|
TypeFifo = internal.TypeFifo
|
||||||
|
TypeSymlink = internal.TypeSymlink
|
||||||
|
)
|
||||||
|
|
||||||
|
var typesToTar = map[string]byte{
|
||||||
|
TypeReg: tar.TypeReg,
|
||||||
|
TypeLink: tar.TypeLink,
|
||||||
|
TypeChar: tar.TypeChar,
|
||||||
|
TypeBlock: tar.TypeBlock,
|
||||||
|
TypeDir: tar.TypeDir,
|
||||||
|
TypeFifo: tar.TypeFifo,
|
||||||
|
TypeSymlink: tar.TypeSymlink,
|
||||||
|
}
|
||||||
|
|
||||||
|
func typeToTarType(t string) (byte, error) {
|
||||||
|
r, found := typesToTar[t]
|
||||||
|
if !found {
|
||||||
|
return 0, fmt.Errorf("unknown type: %v", t)
|
||||||
|
}
|
||||||
|
return r, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func isZstdChunkedFrameMagic(data []byte) bool {
|
||||||
|
if len(data) < 8 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return bytes.Equal(internal.ZstdChunkedFrameMagic, data[:8])
|
||||||
|
}
|
||||||
|
|
||||||
|
// readZstdChunkedManifest reads the zstd:chunked manifest from the seekable stream blobStream. The blob total size must
|
||||||
|
// be specified.
|
||||||
|
// This function uses the io.containers.zstd-chunked. annotations when specified.
|
||||||
|
func readZstdChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, annotations map[string]string) ([]byte, error) {
|
||||||
|
footerSize := int64(internal.FooterSizeSupported)
|
||||||
|
if blobSize <= footerSize {
|
||||||
|
return nil, errors.New("blob too small")
|
||||||
|
}
|
||||||
|
|
||||||
|
manifestChecksumAnnotation := annotations[internal.ManifestChecksumKey]
|
||||||
|
if manifestChecksumAnnotation == "" {
|
||||||
|
return nil, fmt.Errorf("manifest checksum annotation %q not found", internal.ManifestChecksumKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
var offset, length, lengthUncompressed, manifestType uint64
|
||||||
|
|
||||||
|
if offsetMetadata := annotations[internal.ManifestInfoKey]; offsetMetadata != "" {
|
||||||
|
if _, err := fmt.Sscanf(offsetMetadata, "%d:%d:%d:%d", &offset, &length, &lengthUncompressed, &manifestType); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
chunk := ImageSourceChunk{
|
||||||
|
Offset: uint64(blobSize - footerSize),
|
||||||
|
Length: uint64(footerSize),
|
||||||
|
}
|
||||||
|
parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var reader io.ReadCloser
|
||||||
|
select {
|
||||||
|
case r := <-parts:
|
||||||
|
reader = r
|
||||||
|
case err := <-errs:
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
footer := make([]byte, footerSize)
|
||||||
|
if _, err := io.ReadFull(reader, footer); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
offset = binary.LittleEndian.Uint64(footer[0:8])
|
||||||
|
length = binary.LittleEndian.Uint64(footer[8:16])
|
||||||
|
lengthUncompressed = binary.LittleEndian.Uint64(footer[16:24])
|
||||||
|
manifestType = binary.LittleEndian.Uint64(footer[24:32])
|
||||||
|
if !isZstdChunkedFrameMagic(footer[32:40]) {
|
||||||
|
return nil, errors.New("invalid magic number")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if manifestType != internal.ManifestTypeCRFS {
|
||||||
|
return nil, errors.New("invalid manifest type")
|
||||||
|
}
|
||||||
|
|
||||||
|
// set a reasonable limit
|
||||||
|
if length > (1<<20)*50 {
|
||||||
|
return nil, errors.New("manifest too big")
|
||||||
|
}
|
||||||
|
if lengthUncompressed > (1<<20)*50 {
|
||||||
|
return nil, errors.New("manifest too big")
|
||||||
|
}
|
||||||
|
|
||||||
|
chunk := ImageSourceChunk{
|
||||||
|
Offset: offset,
|
||||||
|
Length: length,
|
||||||
|
}
|
||||||
|
|
||||||
|
parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var reader io.ReadCloser
|
||||||
|
select {
|
||||||
|
case r := <-parts:
|
||||||
|
reader = r
|
||||||
|
case err := <-errs:
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
manifest := make([]byte, length)
|
||||||
|
if _, err := io.ReadFull(reader, manifest); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
manifestDigester := digest.Canonical.Digester()
|
||||||
|
manifestChecksum := manifestDigester.Hash()
|
||||||
|
if _, err := manifestChecksum.Write(manifest); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
d, err := digest.Parse(manifestChecksumAnnotation)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if manifestDigester.Digest() != d {
|
||||||
|
return nil, errors.New("invalid manifest checksum")
|
||||||
|
}
|
||||||
|
|
||||||
|
decoder, err := zstd.NewReader(nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer decoder.Close()
|
||||||
|
|
||||||
|
b := make([]byte, 0, lengthUncompressed)
|
||||||
|
if decoded, err := decoder.DecodeAll(manifest, b); err == nil {
|
||||||
|
return decoded, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return manifest, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ZstdCompressor is a CompressorFunc for the zstd compression algorithm.
|
||||||
|
// Deprecated: Use pkg/chunked/compressor.ZstdCompressor.
|
||||||
|
func ZstdCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) {
|
||||||
|
return compressor.ZstdCompressor(r, metadata, level)
|
||||||
|
}
|
220
vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go
generated
vendored
Normal file
220
vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go
generated
vendored
Normal file
|
@ -0,0 +1,220 @@
|
||||||
|
package compressor
|
||||||
|
|
||||||
|
// NOTE: This is used from github.com/containers/image by callers that
|
||||||
|
// don't otherwise use containers/storage, so don't make this depend on any
|
||||||
|
// larger software like the graph drivers.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/base64"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
|
||||||
|
"github.com/containers/storage/pkg/chunked/internal"
|
||||||
|
"github.com/containers/storage/pkg/ioutils"
|
||||||
|
"github.com/opencontainers/go-digest"
|
||||||
|
"github.com/vbatts/tar-split/archive/tar"
|
||||||
|
)
|
||||||
|
|
||||||
|
func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, reader io.Reader, level int) error {
|
||||||
|
// total written so far. Used to retrieve partial offsets in the file
|
||||||
|
dest := ioutils.NewWriteCounter(destFile)
|
||||||
|
|
||||||
|
tr := tar.NewReader(reader)
|
||||||
|
tr.RawAccounting = true
|
||||||
|
|
||||||
|
buf := make([]byte, 4096)
|
||||||
|
|
||||||
|
zstdWriter, err := internal.ZstdWriterWithLevel(dest, level)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if zstdWriter != nil {
|
||||||
|
zstdWriter.Close()
|
||||||
|
zstdWriter.Flush()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
restartCompression := func() (int64, error) {
|
||||||
|
var offset int64
|
||||||
|
if zstdWriter != nil {
|
||||||
|
if err := zstdWriter.Close(); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
if err := zstdWriter.Flush(); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
offset = dest.Count
|
||||||
|
zstdWriter.Reset(dest)
|
||||||
|
}
|
||||||
|
return offset, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var metadata []internal.ZstdFileMetadata
|
||||||
|
for {
|
||||||
|
hdr, err := tr.Next()
|
||||||
|
if err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
rawBytes := tr.RawBytes()
|
||||||
|
if _, err := zstdWriter.Write(rawBytes); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
payloadDigester := digest.Canonical.Digester()
|
||||||
|
payloadChecksum := payloadDigester.Hash()
|
||||||
|
|
||||||
|
payloadDest := io.MultiWriter(payloadChecksum, zstdWriter)
|
||||||
|
|
||||||
|
// Now handle the payload, if any
|
||||||
|
var startOffset, endOffset int64
|
||||||
|
checksum := ""
|
||||||
|
for {
|
||||||
|
read, errRead := tr.Read(buf)
|
||||||
|
if errRead != nil && errRead != io.EOF {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// restart the compression only if there is
|
||||||
|
// a payload.
|
||||||
|
if read > 0 {
|
||||||
|
if startOffset == 0 {
|
||||||
|
startOffset, err = restartCompression()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_, err := payloadDest.Write(buf[:read])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if errRead == io.EOF {
|
||||||
|
if startOffset > 0 {
|
||||||
|
endOffset, err = restartCompression()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
checksum = payloadDigester.Digest().String()
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
typ, err := internal.GetType(hdr.Typeflag)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
xattrs := make(map[string]string)
|
||||||
|
for k, v := range hdr.Xattrs {
|
||||||
|
xattrs[k] = base64.StdEncoding.EncodeToString([]byte(v))
|
||||||
|
}
|
||||||
|
m := internal.ZstdFileMetadata{
|
||||||
|
Type: typ,
|
||||||
|
Name: hdr.Name,
|
||||||
|
Linkname: hdr.Linkname,
|
||||||
|
Mode: hdr.Mode,
|
||||||
|
Size: hdr.Size,
|
||||||
|
UID: hdr.Uid,
|
||||||
|
GID: hdr.Gid,
|
||||||
|
ModTime: hdr.ModTime,
|
||||||
|
AccessTime: hdr.AccessTime,
|
||||||
|
ChangeTime: hdr.ChangeTime,
|
||||||
|
Devmajor: hdr.Devmajor,
|
||||||
|
Devminor: hdr.Devminor,
|
||||||
|
Xattrs: xattrs,
|
||||||
|
Digest: checksum,
|
||||||
|
Offset: startOffset,
|
||||||
|
EndOffset: endOffset,
|
||||||
|
|
||||||
|
// ChunkSize is 0 for the last chunk
|
||||||
|
ChunkSize: 0,
|
||||||
|
ChunkOffset: 0,
|
||||||
|
ChunkDigest: checksum,
|
||||||
|
}
|
||||||
|
metadata = append(metadata, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
rawBytes := tr.RawBytes()
|
||||||
|
if _, err := zstdWriter.Write(rawBytes); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := zstdWriter.Flush(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := zstdWriter.Close(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
zstdWriter = nil
|
||||||
|
|
||||||
|
return internal.WriteZstdChunkedManifest(dest, outMetadata, uint64(dest.Count), metadata, level)
|
||||||
|
}
|
||||||
|
|
||||||
|
type zstdChunkedWriter struct {
|
||||||
|
tarSplitOut *io.PipeWriter
|
||||||
|
tarSplitErr chan error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w zstdChunkedWriter) Close() error {
|
||||||
|
err := <-w.tarSplitErr
|
||||||
|
if err != nil {
|
||||||
|
w.tarSplitOut.Close()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return w.tarSplitOut.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w zstdChunkedWriter) Write(p []byte) (int, error) {
|
||||||
|
select {
|
||||||
|
case err := <-w.tarSplitErr:
|
||||||
|
w.tarSplitOut.Close()
|
||||||
|
return 0, err
|
||||||
|
default:
|
||||||
|
return w.tarSplitOut.Write(p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// zstdChunkedWriterWithLevel writes a zstd compressed tarball where each file is
|
||||||
|
// compressed separately so it can be addressed separately. Idea based on CRFS:
|
||||||
|
// https://github.com/google/crfs
|
||||||
|
// The difference with CRFS is that the zstd compression is used instead of gzip.
|
||||||
|
// The reason for it is that zstd supports embedding metadata ignored by the decoder
|
||||||
|
// as part of the compressed stream.
|
||||||
|
// A manifest json file with all the metadata is appended at the end of the tarball
|
||||||
|
// stream, using zstd skippable frames.
|
||||||
|
// The final file will look like:
|
||||||
|
// [FILE_1][FILE_2]..[FILE_N][SKIPPABLE FRAME 1][SKIPPABLE FRAME 2]
|
||||||
|
// Where:
|
||||||
|
// [FILE_N]: [ZSTD HEADER][TAR HEADER][PAYLOAD FILE_N][ZSTD FOOTER]
|
||||||
|
// [SKIPPABLE FRAME 1]: [ZSTD SKIPPABLE FRAME, SIZE=MANIFEST LENGTH][MANIFEST]
|
||||||
|
// [SKIPPABLE FRAME 2]: [ZSTD SKIPPABLE FRAME, SIZE=16][MANIFEST_OFFSET][MANIFEST_LENGTH][MANIFEST_LENGTH_UNCOMPRESSED][MANIFEST_TYPE][CHUNKED_ZSTD_MAGIC_NUMBER]
|
||||||
|
// MANIFEST_OFFSET, MANIFEST_LENGTH, MANIFEST_LENGTH_UNCOMPRESSED and CHUNKED_ZSTD_MAGIC_NUMBER are 64 bits unsigned in little endian format.
|
||||||
|
func zstdChunkedWriterWithLevel(out io.Writer, metadata map[string]string, level int) (io.WriteCloser, error) {
|
||||||
|
ch := make(chan error, 1)
|
||||||
|
r, w := io.Pipe()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
ch <- writeZstdChunkedStream(out, metadata, r, level)
|
||||||
|
io.Copy(ioutil.Discard, r)
|
||||||
|
r.Close()
|
||||||
|
close(ch)
|
||||||
|
}()
|
||||||
|
|
||||||
|
return zstdChunkedWriter{
|
||||||
|
tarSplitOut: w,
|
||||||
|
tarSplitErr: ch,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ZstdCompressor is a CompressorFunc for the zstd compression algorithm.
|
||||||
|
func ZstdCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) {
|
||||||
|
if level == nil {
|
||||||
|
l := 3
|
||||||
|
level = &l
|
||||||
|
}
|
||||||
|
|
||||||
|
return zstdChunkedWriterWithLevel(r, metadata, *level)
|
||||||
|
}
|
172
vendor/github.com/containers/storage/pkg/chunked/internal/compression.go
generated
vendored
Normal file
172
vendor/github.com/containers/storage/pkg/chunked/internal/compression.go
generated
vendored
Normal file
|
@ -0,0 +1,172 @@
|
||||||
|
package internal
|
||||||
|
|
||||||
|
// NOTE: This is used from github.com/containers/image by callers that
|
||||||
|
// don't otherwise use containers/storage, so don't make this depend on any
|
||||||
|
// larger software like the graph drivers.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/klauspost/compress/zstd"
|
||||||
|
"github.com/opencontainers/go-digest"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ZstdTOC struct {
|
||||||
|
Version int `json:"version"`
|
||||||
|
Entries []ZstdFileMetadata `json:"entries"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ZstdFileMetadata struct {
|
||||||
|
Type string `json:"type"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Linkname string `json:"linkName,omitempty"`
|
||||||
|
Mode int64 `json:"mode,omitempty"`
|
||||||
|
Size int64 `json:"size"`
|
||||||
|
UID int `json:"uid"`
|
||||||
|
GID int `json:"gid"`
|
||||||
|
ModTime time.Time `json:"modtime"`
|
||||||
|
AccessTime time.Time `json:"accesstime"`
|
||||||
|
ChangeTime time.Time `json:"changetime"`
|
||||||
|
Devmajor int64 `json:"devMajor"`
|
||||||
|
Devminor int64 `json:"devMinor"`
|
||||||
|
Xattrs map[string]string `json:"xattrs,omitempty"`
|
||||||
|
Digest string `json:"digest,omitempty"`
|
||||||
|
Offset int64 `json:"offset,omitempty"`
|
||||||
|
EndOffset int64 `json:"endOffset,omitempty"`
|
||||||
|
|
||||||
|
// Currently chunking is not supported.
|
||||||
|
ChunkSize int64 `json:"chunkSize,omitempty"`
|
||||||
|
ChunkOffset int64 `json:"chunkOffset,omitempty"`
|
||||||
|
ChunkDigest string `json:"chunkDigest,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
TypeReg = "reg"
|
||||||
|
TypeChunk = "chunk"
|
||||||
|
TypeLink = "hardlink"
|
||||||
|
TypeChar = "char"
|
||||||
|
TypeBlock = "block"
|
||||||
|
TypeDir = "dir"
|
||||||
|
TypeFifo = "fifo"
|
||||||
|
TypeSymlink = "symlink"
|
||||||
|
)
|
||||||
|
|
||||||
|
var TarTypes = map[byte]string{
|
||||||
|
tar.TypeReg: TypeReg,
|
||||||
|
tar.TypeRegA: TypeReg,
|
||||||
|
tar.TypeLink: TypeLink,
|
||||||
|
tar.TypeChar: TypeChar,
|
||||||
|
tar.TypeBlock: TypeBlock,
|
||||||
|
tar.TypeDir: TypeDir,
|
||||||
|
tar.TypeFifo: TypeFifo,
|
||||||
|
tar.TypeSymlink: TypeSymlink,
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetType(t byte) (string, error) {
|
||||||
|
r, found := TarTypes[t]
|
||||||
|
if !found {
|
||||||
|
return "", fmt.Errorf("unknown tarball type: %v", t)
|
||||||
|
}
|
||||||
|
return r, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
ManifestChecksumKey = "io.containers.zstd-chunked.manifest-checksum"
|
||||||
|
ManifestInfoKey = "io.containers.zstd-chunked.manifest-position"
|
||||||
|
|
||||||
|
// ManifestTypeCRFS is a manifest file compatible with the CRFS TOC file.
|
||||||
|
ManifestTypeCRFS = 1
|
||||||
|
|
||||||
|
// FooterSizeSupported is the footer size supported by this implementation.
|
||||||
|
// Newer versions of the image format might increase this value, so reject
|
||||||
|
// any version that is not supported.
|
||||||
|
FooterSizeSupported = 40
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// when the zstd decoder encounters a skippable frame + 1 byte for the size, it
|
||||||
|
// will ignore it.
|
||||||
|
// https://tools.ietf.org/html/rfc8478#section-3.1.2
|
||||||
|
skippableFrameMagic = []byte{0x50, 0x2a, 0x4d, 0x18}
|
||||||
|
|
||||||
|
ZstdChunkedFrameMagic = []byte{0x47, 0x6e, 0x55, 0x6c, 0x49, 0x6e, 0x55, 0x78}
|
||||||
|
)
|
||||||
|
|
||||||
|
func appendZstdSkippableFrame(dest io.Writer, data []byte) error {
|
||||||
|
if _, err := dest.Write(skippableFrameMagic); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var size []byte = make([]byte, 4)
|
||||||
|
binary.LittleEndian.PutUint32(size, uint32(len(data)))
|
||||||
|
if _, err := dest.Write(size); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := dest.Write(data); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, offset uint64, metadata []ZstdFileMetadata, level int) error {
|
||||||
|
// 8 is the size of the zstd skippable frame header + the frame size
|
||||||
|
manifestOffset := offset + 8
|
||||||
|
|
||||||
|
toc := ZstdTOC{
|
||||||
|
Version: 1,
|
||||||
|
Entries: metadata,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate the manifest
|
||||||
|
manifest, err := json.Marshal(toc)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var compressedBuffer bytes.Buffer
|
||||||
|
zstdWriter, err := ZstdWriterWithLevel(&compressedBuffer, level)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := zstdWriter.Write(manifest); err != nil {
|
||||||
|
zstdWriter.Close()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := zstdWriter.Close(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
compressedManifest := compressedBuffer.Bytes()
|
||||||
|
|
||||||
|
manifestDigester := digest.Canonical.Digester()
|
||||||
|
manifestChecksum := manifestDigester.Hash()
|
||||||
|
if _, err := manifestChecksum.Write(compressedManifest); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
outMetadata[ManifestChecksumKey] = manifestDigester.Digest().String()
|
||||||
|
outMetadata[ManifestInfoKey] = fmt.Sprintf("%d:%d:%d:%d", manifestOffset, len(compressedManifest), len(manifest), ManifestTypeCRFS)
|
||||||
|
if err := appendZstdSkippableFrame(dest, compressedManifest); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store the offset to the manifest and its size in LE order
|
||||||
|
var manifestDataLE []byte = make([]byte, FooterSizeSupported)
|
||||||
|
binary.LittleEndian.PutUint64(manifestDataLE, manifestOffset)
|
||||||
|
binary.LittleEndian.PutUint64(manifestDataLE[8:], uint64(len(compressedManifest)))
|
||||||
|
binary.LittleEndian.PutUint64(manifestDataLE[16:], uint64(len(manifest)))
|
||||||
|
binary.LittleEndian.PutUint64(manifestDataLE[24:], uint64(ManifestTypeCRFS))
|
||||||
|
copy(manifestDataLE[32:], ZstdChunkedFrameMagic)
|
||||||
|
|
||||||
|
return appendZstdSkippableFrame(dest, manifestDataLE)
|
||||||
|
}
|
||||||
|
|
||||||
|
func ZstdWriterWithLevel(dest io.Writer, level int) (*zstd.Encoder, error) {
|
||||||
|
el := zstd.EncoderLevelFromZstd(level)
|
||||||
|
return zstd.NewWriter(dest, zstd.WithEncoderLevel(el))
|
||||||
|
}
|
|
@ -0,0 +1,26 @@
|
||||||
|
package chunked
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ImageSourceChunk is a portion of a blob.
|
||||||
|
type ImageSourceChunk struct {
|
||||||
|
Offset uint64
|
||||||
|
Length uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// ImageSourceSeekable is an image source that permits to fetch chunks of the entire blob.
|
||||||
|
type ImageSourceSeekable interface {
|
||||||
|
// GetBlobAt returns a stream for the specified blob.
|
||||||
|
GetBlobAt([]ImageSourceChunk) (chan io.ReadCloser, chan error, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrBadRequest is returned when the request is not valid
|
||||||
|
type ErrBadRequest struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e ErrBadRequest) Error() string {
|
||||||
|
return fmt.Sprintf("bad request")
|
||||||
|
}
|
875
vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
generated
vendored
Normal file
875
vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,875 @@
|
||||||
|
package chunked
|
||||||
|
|
||||||
|
import (
|
||||||
|
archivetar "archive/tar"
|
||||||
|
"context"
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
storage "github.com/containers/storage"
|
||||||
|
graphdriver "github.com/containers/storage/drivers"
|
||||||
|
driversCopy "github.com/containers/storage/drivers/copy"
|
||||||
|
"github.com/containers/storage/pkg/archive"
|
||||||
|
"github.com/containers/storage/pkg/chunked/internal"
|
||||||
|
"github.com/containers/storage/pkg/idtools"
|
||||||
|
"github.com/containers/storage/types"
|
||||||
|
"github.com/klauspost/compress/zstd"
|
||||||
|
digest "github.com/opencontainers/go-digest"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/vbatts/tar-split/archive/tar"
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
maxNumberMissingChunks = 1024
|
||||||
|
newFileFlags = (unix.O_CREAT | unix.O_TRUNC | unix.O_WRONLY | unix.O_EXCL)
|
||||||
|
containersOverrideXattr = "user.containers.override_stat"
|
||||||
|
bigDataKey = "zstd-chunked-manifest"
|
||||||
|
)
|
||||||
|
|
||||||
|
type chunkedZstdDiffer struct {
|
||||||
|
stream ImageSourceSeekable
|
||||||
|
manifest []byte
|
||||||
|
layersMetadata map[string][]internal.ZstdFileMetadata
|
||||||
|
layersTarget map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
func timeToTimespec(time time.Time) (ts unix.Timespec) {
|
||||||
|
if time.IsZero() {
|
||||||
|
// Return UTIME_OMIT special value
|
||||||
|
ts.Sec = 0
|
||||||
|
ts.Nsec = ((1 << 30) - 2)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return unix.NsecToTimespec(time.UnixNano())
|
||||||
|
}
|
||||||
|
|
||||||
|
func copyFileContent(src, destFile, root string, dirfd int, missingDirsMode, mode os.FileMode) (*os.File, int64, error) {
|
||||||
|
st, err := os.Stat(src)
|
||||||
|
if err != nil {
|
||||||
|
return nil, -1, err
|
||||||
|
}
|
||||||
|
|
||||||
|
copyWithFileRange, copyWithFileClone := true, true
|
||||||
|
|
||||||
|
// If the destination file already exists, we shouldn't blow it away
|
||||||
|
dstFile, err := openFileUnderRoot(destFile, root, dirfd, newFileFlags, mode)
|
||||||
|
if err != nil {
|
||||||
|
return nil, -1, err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = driversCopy.CopyRegularToFile(src, dstFile, st, ©WithFileRange, ©WithFileClone)
|
||||||
|
if err != nil {
|
||||||
|
dstFile.Close()
|
||||||
|
return nil, -1, err
|
||||||
|
}
|
||||||
|
return dstFile, st.Size(), err
|
||||||
|
}
|
||||||
|
|
||||||
|
func prepareOtherLayersCache(layersMetadata map[string][]internal.ZstdFileMetadata) map[string]map[string]*internal.ZstdFileMetadata {
|
||||||
|
maps := make(map[string]map[string]*internal.ZstdFileMetadata)
|
||||||
|
|
||||||
|
for layerID, v := range layersMetadata {
|
||||||
|
r := make(map[string]*internal.ZstdFileMetadata)
|
||||||
|
for i := range v {
|
||||||
|
r[v[i].Digest] = &v[i]
|
||||||
|
}
|
||||||
|
maps[layerID] = r
|
||||||
|
}
|
||||||
|
return maps
|
||||||
|
}
|
||||||
|
|
||||||
|
func getLayersCache(store storage.Store) (map[string][]internal.ZstdFileMetadata, map[string]string, error) {
|
||||||
|
allLayers, err := store.Layers()
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
layersMetadata := make(map[string][]internal.ZstdFileMetadata)
|
||||||
|
layersTarget := make(map[string]string)
|
||||||
|
for _, r := range allLayers {
|
||||||
|
manifestReader, err := store.LayerBigData(r.ID, bigDataKey)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
defer manifestReader.Close()
|
||||||
|
manifest, err := ioutil.ReadAll(manifestReader)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
var toc internal.ZstdTOC
|
||||||
|
if err := json.Unmarshal(manifest, &toc); err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
layersMetadata[r.ID] = toc.Entries
|
||||||
|
target, err := store.DifferTarget(r.ID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
layersTarget[r.ID] = target
|
||||||
|
}
|
||||||
|
|
||||||
|
return layersMetadata, layersTarget, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetDiffer returns a differ than can be used with ApplyDiffWithDiffer.
|
||||||
|
func GetDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) {
|
||||||
|
if _, ok := annotations[internal.ManifestChecksumKey]; ok {
|
||||||
|
return makeZstdChunkedDiffer(ctx, store, blobSize, annotations, iss)
|
||||||
|
}
|
||||||
|
return nil, errors.New("blob type not supported for partial retrieval")
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (*chunkedZstdDiffer, error) {
|
||||||
|
manifest, err := readZstdChunkedManifest(iss, blobSize, annotations)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
layersMetadata, layersTarget, err := getLayersCache(store)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &chunkedZstdDiffer{
|
||||||
|
stream: iss,
|
||||||
|
manifest: manifest,
|
||||||
|
layersMetadata: layersMetadata,
|
||||||
|
layersTarget: layersTarget,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func findFileInOtherLayers(file internal.ZstdFileMetadata, root string, dirfd int, layersMetadata map[string]map[string]*internal.ZstdFileMetadata, layersTarget map[string]string, missingDirsMode os.FileMode) (*os.File, int64, error) {
|
||||||
|
// this is ugly, needs to be indexed
|
||||||
|
for layerID, checksums := range layersMetadata {
|
||||||
|
m, found := checksums[file.Digest]
|
||||||
|
if !found {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
source, ok := layersTarget[layerID]
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
srcDirfd, err := unix.Open(source, unix.O_RDONLY, 0)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
defer unix.Close(srcDirfd)
|
||||||
|
|
||||||
|
srcFile, err := openFileUnderRoot(m.Name, source, srcDirfd, unix.O_RDONLY, 0)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
defer srcFile.Close()
|
||||||
|
|
||||||
|
srcPath := fmt.Sprintf("/proc/self/fd/%d", srcFile.Fd())
|
||||||
|
|
||||||
|
dstFile, written, err := copyFileContent(srcPath, file.Name, root, dirfd, missingDirsMode, 0)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return dstFile, written, nil
|
||||||
|
}
|
||||||
|
return nil, 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getFileDigest(f *os.File) (digest.Digest, error) {
|
||||||
|
digester := digest.Canonical.Digester()
|
||||||
|
if _, err := io.Copy(digester.Hash(), f); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return digester.Digest(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// findFileOnTheHost checks whether the requested file already exist on the host and copies the file content from there if possible.
|
||||||
|
// It is currently implemented to look only at the file with the same path. Ideally it can detect the same content also at different
|
||||||
|
// paths.
|
||||||
|
func findFileOnTheHost(file internal.ZstdFileMetadata, root string, dirfd int, missingDirsMode os.FileMode) (*os.File, int64, error) {
|
||||||
|
sourceFile := filepath.Clean(filepath.Join("/", file.Name))
|
||||||
|
if !strings.HasPrefix(sourceFile, "/usr/") {
|
||||||
|
// limit host deduplication to files under /usr.
|
||||||
|
return nil, 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
st, err := os.Stat(sourceFile)
|
||||||
|
if err != nil || !st.Mode().IsRegular() {
|
||||||
|
return nil, 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if st.Size() != file.Size {
|
||||||
|
return nil, 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
fd, err := unix.Open(sourceFile, unix.O_RDONLY|unix.O_NONBLOCK, 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
f := os.NewFile(uintptr(fd), "fd")
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
manifestChecksum, err := digest.Parse(file.Digest)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
checksum, err := getFileDigest(f)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if checksum != manifestChecksum {
|
||||||
|
return nil, 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
dstFile, written, err := copyFileContent(fmt.Sprintf("/proc/self/fd/%d", fd), file.Name, root, dirfd, missingDirsMode, 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// calculate the checksum again to make sure the file wasn't modified while it was copied
|
||||||
|
if _, err := f.Seek(0, 0); err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
checksum, err = getFileDigest(f)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
if checksum != manifestChecksum {
|
||||||
|
return nil, 0, nil
|
||||||
|
}
|
||||||
|
return dstFile, written, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func maybeDoIDRemap(manifest []internal.ZstdFileMetadata, options *archive.TarOptions) error {
|
||||||
|
if options.ChownOpts == nil && len(options.UIDMaps) == 0 || len(options.GIDMaps) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
|
||||||
|
|
||||||
|
for i := range manifest {
|
||||||
|
if options.ChownOpts != nil {
|
||||||
|
manifest[i].UID = options.ChownOpts.UID
|
||||||
|
manifest[i].GID = options.ChownOpts.GID
|
||||||
|
} else {
|
||||||
|
pair := idtools.IDPair{
|
||||||
|
UID: manifest[i].UID,
|
||||||
|
GID: manifest[i].GID,
|
||||||
|
}
|
||||||
|
var err error
|
||||||
|
manifest[i].UID, manifest[i].GID, err = idMappings.ToContainer(pair)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type missingFile struct {
|
||||||
|
File *internal.ZstdFileMetadata
|
||||||
|
Gap int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m missingFile) Length() int64 {
|
||||||
|
return m.File.EndOffset - m.File.Offset
|
||||||
|
}
|
||||||
|
|
||||||
|
type missingChunk struct {
|
||||||
|
RawChunk ImageSourceChunk
|
||||||
|
Files []missingFile
|
||||||
|
}
|
||||||
|
|
||||||
|
func setFileAttrs(file *os.File, mode os.FileMode, metadata *internal.ZstdFileMetadata, options *archive.TarOptions) error {
|
||||||
|
if file == nil || file.Fd() < 0 {
|
||||||
|
return errors.Errorf("invalid file")
|
||||||
|
}
|
||||||
|
fd := int(file.Fd())
|
||||||
|
|
||||||
|
t, err := typeToTarType(metadata.Type)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if t == tar.TypeSymlink {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := unix.Fchown(fd, metadata.UID, metadata.GID); err != nil {
|
||||||
|
if !options.IgnoreChownErrors {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for k, v := range metadata.Xattrs {
|
||||||
|
data, err := base64.StdEncoding.DecodeString(v)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := unix.Fsetxattr(fd, k, data, 0); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ts := []unix.Timespec{timeToTimespec(metadata.AccessTime), timeToTimespec(metadata.ModTime)}
|
||||||
|
if err := unix.UtimesNanoAt(fd, "", ts, 0); err != nil && errors.Is(err, unix.ENOSYS) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := unix.Fchmod(fd, uint32(mode)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func openFileUnderRoot(name, root string, dirfd int, flags uint64, mode os.FileMode) (*os.File, error) {
|
||||||
|
how := unix.OpenHow{
|
||||||
|
Flags: flags,
|
||||||
|
Mode: uint64(mode & 07777),
|
||||||
|
Resolve: unix.RESOLVE_IN_ROOT,
|
||||||
|
}
|
||||||
|
|
||||||
|
fd, err := unix.Openat2(dirfd, name, &how)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return os.NewFile(uintptr(fd), name), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func createFileFromZstdStream(dest string, dirfd int, reader io.Reader, missingDirsMode, mode os.FileMode, metadata *internal.ZstdFileMetadata, options *archive.TarOptions) (err error) {
|
||||||
|
file, err := openFileUnderRoot(metadata.Name, dest, dirfd, newFileFlags, 0)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
err2 := file.Close()
|
||||||
|
if err == nil {
|
||||||
|
err = err2
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
z, err := zstd.NewReader(reader)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer z.Close()
|
||||||
|
|
||||||
|
digester := digest.Canonical.Digester()
|
||||||
|
checksum := digester.Hash()
|
||||||
|
_, err = z.WriteTo(io.MultiWriter(file, checksum))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
manifestChecksum, err := digest.Parse(metadata.Digest)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if digester.Digest() != manifestChecksum {
|
||||||
|
return fmt.Errorf("checksum mismatch for %q", dest)
|
||||||
|
}
|
||||||
|
return setFileAttrs(file, mode, metadata, options)
|
||||||
|
}
|
||||||
|
|
||||||
|
func storeMissingFiles(streams chan io.ReadCloser, errs chan error, dest string, dirfd int, missingChunks []missingChunk, missingDirsMode os.FileMode, options *archive.TarOptions) error {
|
||||||
|
for mc := 0; ; mc++ {
|
||||||
|
var part io.ReadCloser
|
||||||
|
select {
|
||||||
|
case p := <-streams:
|
||||||
|
part = p
|
||||||
|
case err := <-errs:
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if part == nil {
|
||||||
|
if mc == len(missingChunks) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return errors.Errorf("invalid stream returned %d %d", mc, len(missingChunks))
|
||||||
|
}
|
||||||
|
if mc == len(missingChunks) {
|
||||||
|
return errors.Errorf("too many chunks returned")
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, mf := range missingChunks[mc].Files {
|
||||||
|
if mf.Gap > 0 {
|
||||||
|
limitReader := io.LimitReader(part, mf.Gap)
|
||||||
|
_, err := io.Copy(ioutil.Discard, limitReader)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
limitReader := io.LimitReader(part, mf.Length())
|
||||||
|
|
||||||
|
if err := createFileFromZstdStream(dest, dirfd, limitReader, missingDirsMode, os.FileMode(mf.File.Mode), mf.File, options); err != nil {
|
||||||
|
part.Close()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
part.Close()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func mergeMissingChunks(missingChunks []missingChunk, target int) []missingChunk {
|
||||||
|
if len(missingChunks) <= target {
|
||||||
|
return missingChunks
|
||||||
|
}
|
||||||
|
|
||||||
|
getGap := func(missingChunks []missingChunk, i int) int {
|
||||||
|
prev := missingChunks[i-1].RawChunk.Offset + missingChunks[i-1].RawChunk.Length
|
||||||
|
return int(missingChunks[i].RawChunk.Offset - prev)
|
||||||
|
}
|
||||||
|
|
||||||
|
// this implementation doesn't account for duplicates, so it could merge
|
||||||
|
// more than necessary to reach the specified target. Since target itself
|
||||||
|
// is a heuristic value, it doesn't matter.
|
||||||
|
var gaps []int
|
||||||
|
for i := 1; i < len(missingChunks); i++ {
|
||||||
|
gaps = append(gaps, getGap(missingChunks, i))
|
||||||
|
}
|
||||||
|
sort.Ints(gaps)
|
||||||
|
|
||||||
|
toShrink := len(missingChunks) - target
|
||||||
|
targetValue := gaps[toShrink-1]
|
||||||
|
|
||||||
|
newMissingChunks := missingChunks[0:1]
|
||||||
|
for i := 1; i < len(missingChunks); i++ {
|
||||||
|
gap := getGap(missingChunks, i)
|
||||||
|
if gap > targetValue {
|
||||||
|
newMissingChunks = append(newMissingChunks, missingChunks[i])
|
||||||
|
} else {
|
||||||
|
prev := &newMissingChunks[len(newMissingChunks)-1]
|
||||||
|
gapFile := missingFile{
|
||||||
|
Gap: int64(gap),
|
||||||
|
}
|
||||||
|
prev.RawChunk.Length += uint64(gap) + missingChunks[i].RawChunk.Length
|
||||||
|
prev.Files = append(append(prev.Files, gapFile), missingChunks[i].Files...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return newMissingChunks
|
||||||
|
}
|
||||||
|
|
||||||
|
func retrieveMissingFiles(input *chunkedZstdDiffer, dest string, dirfd int, missingChunks []missingChunk, missingDirsMode os.FileMode, options *archive.TarOptions) error {
|
||||||
|
var chunksToRequest []ImageSourceChunk
|
||||||
|
for _, c := range missingChunks {
|
||||||
|
chunksToRequest = append(chunksToRequest, c.RawChunk)
|
||||||
|
}
|
||||||
|
|
||||||
|
// There are some missing files. Prepare a multirange request for the missing chunks.
|
||||||
|
var streams chan io.ReadCloser
|
||||||
|
var err error
|
||||||
|
var errs chan error
|
||||||
|
for {
|
||||||
|
streams, errs, err = input.stream.GetBlobAt(chunksToRequest)
|
||||||
|
if err == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := err.(ErrBadRequest); ok {
|
||||||
|
requested := len(missingChunks)
|
||||||
|
// If the server cannot handle at least 64 chunks in a single request, just give up.
|
||||||
|
if requested < 64 {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Merge more chunks to request
|
||||||
|
missingChunks = mergeMissingChunks(missingChunks, requested/2)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := storeMissingFiles(streams, errs, dest, dirfd, missingChunks, missingDirsMode, options); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func safeMkdir(target string, dirfd int, mode os.FileMode, metadata *internal.ZstdFileMetadata, options *archive.TarOptions) error {
|
||||||
|
parent := filepath.Dir(metadata.Name)
|
||||||
|
base := filepath.Base(metadata.Name)
|
||||||
|
|
||||||
|
parentFd := dirfd
|
||||||
|
if parent != "." {
|
||||||
|
parentFile, err := openFileUnderRoot(parent, target, dirfd, unix.O_DIRECTORY|unix.O_PATH|unix.O_RDONLY, 0)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer parentFile.Close()
|
||||||
|
parentFd = int(parentFile.Fd())
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := unix.Mkdirat(parentFd, base, uint32(mode)); err != nil {
|
||||||
|
if !os.IsExist(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
file, err := openFileUnderRoot(metadata.Name, target, dirfd, unix.O_RDONLY, 0)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
return setFileAttrs(file, mode, metadata, options)
|
||||||
|
}
|
||||||
|
|
||||||
|
func safeLink(target string, dirfd int, mode os.FileMode, metadata *internal.ZstdFileMetadata, options *archive.TarOptions) error {
|
||||||
|
sourceFile, err := openFileUnderRoot(metadata.Linkname, target, dirfd, unix.O_RDONLY, 0)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer sourceFile.Close()
|
||||||
|
|
||||||
|
destDir, destBase := filepath.Dir(metadata.Name), filepath.Base(metadata.Name)
|
||||||
|
destDirFd := dirfd
|
||||||
|
if destDir != "." {
|
||||||
|
f, err := openFileUnderRoot(destDir, target, dirfd, unix.O_RDONLY, 0)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
destDirFd = int(f.Fd())
|
||||||
|
}
|
||||||
|
|
||||||
|
err = unix.Linkat(int(sourceFile.Fd()), "", destDirFd, destBase, unix.AT_EMPTY_PATH)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
newFile, err := openFileUnderRoot(metadata.Name, target, dirfd, unix.O_WRONLY, 0)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer newFile.Close()
|
||||||
|
|
||||||
|
return setFileAttrs(newFile, mode, metadata, options)
|
||||||
|
}
|
||||||
|
|
||||||
|
func safeSymlink(target string, dirfd int, mode os.FileMode, metadata *internal.ZstdFileMetadata, options *archive.TarOptions) error {
|
||||||
|
destDir, destBase := filepath.Dir(metadata.Name), filepath.Base(metadata.Name)
|
||||||
|
destDirFd := dirfd
|
||||||
|
if destDir != "." {
|
||||||
|
f, err := openFileUnderRoot(destDir, target, dirfd, unix.O_RDONLY, 0)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
destDirFd = int(f.Fd())
|
||||||
|
}
|
||||||
|
|
||||||
|
return unix.Symlinkat(metadata.Linkname, destDirFd, destBase)
|
||||||
|
}
|
||||||
|
|
||||||
|
type whiteoutHandler struct {
|
||||||
|
Dirfd int
|
||||||
|
Root string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d whiteoutHandler) Setxattr(path, name string, value []byte) error {
|
||||||
|
file, err := openFileUnderRoot(path, d.Root, d.Dirfd, unix.O_RDONLY, 0)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
return unix.Fsetxattr(int(file.Fd()), name, value, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d whiteoutHandler) Mknod(path string, mode uint32, dev int) error {
|
||||||
|
dir := filepath.Dir(path)
|
||||||
|
base := filepath.Base(path)
|
||||||
|
|
||||||
|
dirfd := d.Dirfd
|
||||||
|
if dir != "" {
|
||||||
|
dir, err := openFileUnderRoot(dir, d.Root, d.Dirfd, unix.O_RDONLY, 0)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer dir.Close()
|
||||||
|
|
||||||
|
dirfd = int(dir.Fd())
|
||||||
|
}
|
||||||
|
|
||||||
|
return unix.Mknodat(dirfd, base, mode, dev)
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkChownErr(err error, name string, uid, gid int) error {
|
||||||
|
if errors.Is(err, syscall.EINVAL) {
|
||||||
|
return errors.Wrapf(err, "potentially insufficient UIDs or GIDs available in user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid", uid, gid, name)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d whiteoutHandler) Chown(path string, uid, gid int) error {
|
||||||
|
file, err := openFileUnderRoot(path, d.Root, d.Dirfd, unix.O_PATH, 0)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
if err := unix.Fchownat(int(file.Fd()), "", uid, gid, unix.AT_EMPTY_PATH); err != nil {
|
||||||
|
var stat unix.Stat_t
|
||||||
|
if unix.Fstat(int(file.Fd()), &stat) == nil {
|
||||||
|
if stat.Uid == uint32(uid) && stat.Gid == uint32(gid) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return checkChownErr(err, path, uid, gid)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type hardLinkToCreate struct {
|
||||||
|
dest string
|
||||||
|
dirfd int
|
||||||
|
mode os.FileMode
|
||||||
|
metadata *internal.ZstdFileMetadata
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *chunkedZstdDiffer) ApplyDiff(dest string, options *archive.TarOptions) (graphdriver.DriverWithDifferOutput, error) {
|
||||||
|
bigData := map[string][]byte{
|
||||||
|
bigDataKey: d.manifest,
|
||||||
|
}
|
||||||
|
output := graphdriver.DriverWithDifferOutput{
|
||||||
|
Differ: d,
|
||||||
|
BigData: bigData,
|
||||||
|
}
|
||||||
|
|
||||||
|
storeOpts, err := types.DefaultStoreOptionsAutoDetectUID()
|
||||||
|
if err != nil {
|
||||||
|
return output, err
|
||||||
|
}
|
||||||
|
|
||||||
|
enableHostDedup := false
|
||||||
|
if value := storeOpts.PullOptions["enable_host_deduplication"]; strings.ToLower(value) == "true" {
|
||||||
|
enableHostDedup = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate the manifest
|
||||||
|
var toc internal.ZstdTOC
|
||||||
|
if err := json.Unmarshal(d.manifest, &toc); err != nil {
|
||||||
|
return output, err
|
||||||
|
}
|
||||||
|
|
||||||
|
whiteoutConverter := archive.GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData)
|
||||||
|
|
||||||
|
var missingChunks []missingChunk
|
||||||
|
var mergedEntries []internal.ZstdFileMetadata
|
||||||
|
|
||||||
|
if err := maybeDoIDRemap(toc.Entries, options); err != nil {
|
||||||
|
return output, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, e := range toc.Entries {
|
||||||
|
if e.Type == TypeChunk {
|
||||||
|
l := len(mergedEntries)
|
||||||
|
if l == 0 || mergedEntries[l-1].Type != TypeReg {
|
||||||
|
return output, errors.New("chunk type without a regular file")
|
||||||
|
}
|
||||||
|
mergedEntries[l-1].EndOffset = e.EndOffset
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
mergedEntries = append(mergedEntries, e)
|
||||||
|
}
|
||||||
|
|
||||||
|
if options.ForceMask != nil {
|
||||||
|
uid, gid, mode, err := archive.GetFileOwner(dest)
|
||||||
|
if err == nil {
|
||||||
|
value := fmt.Sprintf("%d:%d:0%o", uid, gid, mode)
|
||||||
|
if err := unix.Setxattr(dest, containersOverrideXattr, []byte(value), 0); err != nil {
|
||||||
|
return output, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
dirfd, err := unix.Open(dest, unix.O_RDONLY|unix.O_PATH, 0)
|
||||||
|
if err != nil {
|
||||||
|
return output, err
|
||||||
|
}
|
||||||
|
defer unix.Close(dirfd)
|
||||||
|
|
||||||
|
otherLayersCache := prepareOtherLayersCache(d.layersMetadata)
|
||||||
|
|
||||||
|
missingDirsMode := os.FileMode(0700)
|
||||||
|
if options.ForceMask != nil {
|
||||||
|
missingDirsMode = *options.ForceMask
|
||||||
|
}
|
||||||
|
|
||||||
|
// hardlinks can point to missing files. So create them after all files
|
||||||
|
// are retrieved
|
||||||
|
var hardLinks []hardLinkToCreate
|
||||||
|
|
||||||
|
missingChunksSize, totalChunksSize := int64(0), int64(0)
|
||||||
|
for i, r := range mergedEntries {
|
||||||
|
if options.ForceMask != nil {
|
||||||
|
value := fmt.Sprintf("%d:%d:0%o", r.UID, r.GID, r.Mode&07777)
|
||||||
|
r.Xattrs[containersOverrideXattr] = base64.StdEncoding.EncodeToString([]byte(value))
|
||||||
|
r.Mode = int64(*options.ForceMask)
|
||||||
|
}
|
||||||
|
|
||||||
|
mode := os.FileMode(r.Mode)
|
||||||
|
|
||||||
|
r.Name = filepath.Clean(r.Name)
|
||||||
|
r.Linkname = filepath.Clean(r.Linkname)
|
||||||
|
|
||||||
|
t, err := typeToTarType(r.Type)
|
||||||
|
if err != nil {
|
||||||
|
return output, err
|
||||||
|
}
|
||||||
|
if whiteoutConverter != nil {
|
||||||
|
hdr := archivetar.Header{
|
||||||
|
Typeflag: t,
|
||||||
|
Name: r.Name,
|
||||||
|
Linkname: r.Linkname,
|
||||||
|
Size: r.Size,
|
||||||
|
Mode: r.Mode,
|
||||||
|
Uid: r.UID,
|
||||||
|
Gid: r.GID,
|
||||||
|
}
|
||||||
|
handler := whiteoutHandler{
|
||||||
|
Dirfd: dirfd,
|
||||||
|
Root: dest,
|
||||||
|
}
|
||||||
|
writeFile, err := whiteoutConverter.ConvertReadWithHandler(&hdr, r.Name, &handler)
|
||||||
|
if err != nil {
|
||||||
|
return output, err
|
||||||
|
}
|
||||||
|
if !writeFile {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
switch t {
|
||||||
|
case tar.TypeReg:
|
||||||
|
// Create directly empty files.
|
||||||
|
if r.Size == 0 {
|
||||||
|
// Used to have a scope for cleanup.
|
||||||
|
createEmptyFile := func() error {
|
||||||
|
file, err := openFileUnderRoot(r.Name, dest, dirfd, newFileFlags, 0)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
if err := setFileAttrs(file, mode, &r, options); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if err := createEmptyFile(); err != nil {
|
||||||
|
return output, err
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
case tar.TypeDir:
|
||||||
|
if err := safeMkdir(dest, dirfd, mode, &r, options); err != nil {
|
||||||
|
return output, err
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
|
||||||
|
case tar.TypeLink:
|
||||||
|
dest := dest
|
||||||
|
dirfd := dirfd
|
||||||
|
mode := mode
|
||||||
|
r := r
|
||||||
|
hardLinks = append(hardLinks, hardLinkToCreate{
|
||||||
|
dest: dest,
|
||||||
|
dirfd: dirfd,
|
||||||
|
mode: mode,
|
||||||
|
metadata: &r,
|
||||||
|
})
|
||||||
|
continue
|
||||||
|
|
||||||
|
case tar.TypeSymlink:
|
||||||
|
if err := safeSymlink(dest, dirfd, mode, &r, options); err != nil {
|
||||||
|
return output, err
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
|
||||||
|
case tar.TypeChar:
|
||||||
|
case tar.TypeBlock:
|
||||||
|
case tar.TypeFifo:
|
||||||
|
/* Ignore. */
|
||||||
|
default:
|
||||||
|
return output, fmt.Errorf("invalid type %q", t)
|
||||||
|
}
|
||||||
|
|
||||||
|
totalChunksSize += r.Size
|
||||||
|
|
||||||
|
dstFile, _, err := findFileInOtherLayers(r, dest, dirfd, otherLayersCache, d.layersTarget, missingDirsMode)
|
||||||
|
if err != nil {
|
||||||
|
return output, err
|
||||||
|
}
|
||||||
|
if dstFile != nil {
|
||||||
|
if err := setFileAttrs(dstFile, mode, &r, options); err != nil {
|
||||||
|
dstFile.Close()
|
||||||
|
return output, err
|
||||||
|
}
|
||||||
|
dstFile.Close()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if enableHostDedup {
|
||||||
|
dstFile, _, err = findFileOnTheHost(r, dest, dirfd, missingDirsMode)
|
||||||
|
if err != nil {
|
||||||
|
return output, err
|
||||||
|
}
|
||||||
|
if dstFile != nil {
|
||||||
|
if err := setFileAttrs(dstFile, mode, &r, options); err != nil {
|
||||||
|
dstFile.Close()
|
||||||
|
return output, err
|
||||||
|
}
|
||||||
|
dstFile.Close()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
missingChunksSize += r.Size
|
||||||
|
if t == tar.TypeReg {
|
||||||
|
rawChunk := ImageSourceChunk{
|
||||||
|
Offset: uint64(r.Offset),
|
||||||
|
Length: uint64(r.EndOffset - r.Offset),
|
||||||
|
}
|
||||||
|
file := missingFile{
|
||||||
|
File: &toc.Entries[i],
|
||||||
|
}
|
||||||
|
missingChunks = append(missingChunks, missingChunk{
|
||||||
|
RawChunk: rawChunk,
|
||||||
|
Files: []missingFile{
|
||||||
|
file,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// There are some missing files. Prepare a multirange request for the missing chunks.
|
||||||
|
if len(missingChunks) > 0 {
|
||||||
|
missingChunks = mergeMissingChunks(missingChunks, maxNumberMissingChunks)
|
||||||
|
if err := retrieveMissingFiles(d, dest, dirfd, missingChunks, missingDirsMode, options); err != nil {
|
||||||
|
return output, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, m := range hardLinks {
|
||||||
|
if err := safeLink(m.dest, m.dirfd, m.mode, m.metadata, options); err != nil {
|
||||||
|
return output, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if totalChunksSize > 0 {
|
||||||
|
logrus.Debugf("Missing %d bytes out of %d (%.2f %%)", missingChunksSize, totalChunksSize, float32(missingChunksSize*100.0)/float32(totalChunksSize))
|
||||||
|
}
|
||||||
|
return output, nil
|
||||||
|
}
|
16
vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go
generated
vendored
Normal file
16
vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
// +build !linux
|
||||||
|
|
||||||
|
package chunked
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
storage "github.com/containers/storage"
|
||||||
|
graphdriver "github.com/containers/storage/drivers"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetDiffer returns a differ than can be used with ApplyDiffWithDiffer.
|
||||||
|
func GetDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) {
|
||||||
|
return nil, errors.New("format not supported on this architecture")
|
||||||
|
}
|
|
@ -0,0 +1,7 @@
|
||||||
|
// +build zos
|
||||||
|
|
||||||
|
package cwriter
|
||||||
|
|
||||||
|
import "golang.org/x/sys/unix"
|
||||||
|
|
||||||
|
const ioctlReadTermios = unix.TCGETS
|
|
@ -4,7 +4,7 @@ require (
|
||||||
github.com/VividCortex/ewma v1.2.0
|
github.com/VividCortex/ewma v1.2.0
|
||||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d
|
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d
|
||||||
github.com/mattn/go-runewidth v0.0.13
|
github.com/mattn/go-runewidth v0.0.13
|
||||||
golang.org/x/sys v0.0.0-20210603125802-9665404d3644
|
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22
|
||||||
)
|
)
|
||||||
|
|
||||||
go 1.14
|
go 1.14
|
||||||
|
|
|
@ -6,5 +6,5 @@ github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4
|
||||||
github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||||
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
|
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
|
||||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||||
golang.org/x/sys v0.0.0-20210603125802-9665404d3644 h1:CA1DEQ4NdKphKeL70tvsWNdT5oFh1lOjihRcEDROi0I=
|
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 h1:RqytpXGR1iVNX7psjB3ff8y7sNFinVFvkx1c8SjBkio=
|
||||||
golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
|
|
@ -563,6 +563,7 @@ ccflags="$@"
|
||||||
$2 ~ /^KEYCTL_/ ||
|
$2 ~ /^KEYCTL_/ ||
|
||||||
$2 ~ /^PERF_/ ||
|
$2 ~ /^PERF_/ ||
|
||||||
$2 ~ /^SECCOMP_MODE_/ ||
|
$2 ~ /^SECCOMP_MODE_/ ||
|
||||||
|
$2 ~ /^SEEK_/ ||
|
||||||
$2 ~ /^SPLICE_/ ||
|
$2 ~ /^SPLICE_/ ||
|
||||||
$2 ~ /^SYNC_FILE_RANGE_/ ||
|
$2 ~ /^SYNC_FILE_RANGE_/ ||
|
||||||
$2 !~ /^AUDIT_RECORD_MAGIC/ &&
|
$2 !~ /^AUDIT_RECORD_MAGIC/ &&
|
||||||
|
|
|
@ -13,6 +13,7 @@
|
||||||
package unix
|
package unix
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"runtime"
|
"runtime"
|
||||||
"syscall"
|
"syscall"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
@ -398,6 +399,38 @@ func GetsockoptXucred(fd, level, opt int) (*Xucred, error) {
|
||||||
return x, err
|
return x, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func SysctlKinfoProcSlice(name string) ([]KinfoProc, error) {
|
||||||
|
mib, err := sysctlmib(name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find size.
|
||||||
|
n := uintptr(0)
|
||||||
|
if err := sysctl(mib, nil, &n, nil, 0); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if n == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
if n%SizeofKinfoProc != 0 {
|
||||||
|
return nil, fmt.Errorf("sysctl() returned a size of %d, which is not a multiple of %d", n, SizeofKinfoProc)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read into buffer of that size.
|
||||||
|
buf := make([]KinfoProc, n/SizeofKinfoProc)
|
||||||
|
if err := sysctl(mib, (*byte)(unsafe.Pointer(&buf[0])), &n, nil, 0); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if n%SizeofKinfoProc != 0 {
|
||||||
|
return nil, fmt.Errorf("sysctl() returned a size of %d, which is not a multiple of %d", n, SizeofKinfoProc)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The actual call may return less than the original reported required
|
||||||
|
// size so ensure we deal with that.
|
||||||
|
return buf[:n/SizeofKinfoProc], nil
|
||||||
|
}
|
||||||
|
|
||||||
//sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error)
|
//sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -1262,6 +1262,11 @@ const (
|
||||||
SCM_RIGHTS = 0x1
|
SCM_RIGHTS = 0x1
|
||||||
SCM_TIMESTAMP = 0x2
|
SCM_TIMESTAMP = 0x2
|
||||||
SCM_TIMESTAMP_MONOTONIC = 0x4
|
SCM_TIMESTAMP_MONOTONIC = 0x4
|
||||||
|
SEEK_CUR = 0x1
|
||||||
|
SEEK_DATA = 0x4
|
||||||
|
SEEK_END = 0x2
|
||||||
|
SEEK_HOLE = 0x3
|
||||||
|
SEEK_SET = 0x0
|
||||||
SHUT_RD = 0x0
|
SHUT_RD = 0x0
|
||||||
SHUT_RDWR = 0x2
|
SHUT_RDWR = 0x2
|
||||||
SHUT_WR = 0x1
|
SHUT_WR = 0x1
|
||||||
|
|
|
@ -1262,6 +1262,11 @@ const (
|
||||||
SCM_RIGHTS = 0x1
|
SCM_RIGHTS = 0x1
|
||||||
SCM_TIMESTAMP = 0x2
|
SCM_TIMESTAMP = 0x2
|
||||||
SCM_TIMESTAMP_MONOTONIC = 0x4
|
SCM_TIMESTAMP_MONOTONIC = 0x4
|
||||||
|
SEEK_CUR = 0x1
|
||||||
|
SEEK_DATA = 0x4
|
||||||
|
SEEK_END = 0x2
|
||||||
|
SEEK_HOLE = 0x3
|
||||||
|
SEEK_SET = 0x0
|
||||||
SHUT_RD = 0x0
|
SHUT_RD = 0x0
|
||||||
SHUT_RDWR = 0x2
|
SHUT_RDWR = 0x2
|
||||||
SHUT_WR = 0x1
|
SHUT_WR = 0x1
|
||||||
|
|
|
@ -1297,6 +1297,11 @@ const (
|
||||||
SCM_RIGHTS = 0x1
|
SCM_RIGHTS = 0x1
|
||||||
SCM_TIMESTAMP = 0x2
|
SCM_TIMESTAMP = 0x2
|
||||||
SCM_TIME_INFO = 0x7
|
SCM_TIME_INFO = 0x7
|
||||||
|
SEEK_CUR = 0x1
|
||||||
|
SEEK_DATA = 0x3
|
||||||
|
SEEK_END = 0x2
|
||||||
|
SEEK_HOLE = 0x4
|
||||||
|
SEEK_SET = 0x0
|
||||||
SHUT_RD = 0x0
|
SHUT_RD = 0x0
|
||||||
SHUT_RDWR = 0x2
|
SHUT_RDWR = 0x2
|
||||||
SHUT_WR = 0x1
|
SHUT_WR = 0x1
|
||||||
|
|
|
@ -1298,6 +1298,11 @@ const (
|
||||||
SCM_RIGHTS = 0x1
|
SCM_RIGHTS = 0x1
|
||||||
SCM_TIMESTAMP = 0x2
|
SCM_TIMESTAMP = 0x2
|
||||||
SCM_TIME_INFO = 0x7
|
SCM_TIME_INFO = 0x7
|
||||||
|
SEEK_CUR = 0x1
|
||||||
|
SEEK_DATA = 0x3
|
||||||
|
SEEK_END = 0x2
|
||||||
|
SEEK_HOLE = 0x4
|
||||||
|
SEEK_SET = 0x0
|
||||||
SHUT_RD = 0x0
|
SHUT_RD = 0x0
|
||||||
SHUT_RDWR = 0x2
|
SHUT_RDWR = 0x2
|
||||||
SHUT_WR = 0x1
|
SHUT_WR = 0x1
|
||||||
|
|
|
@ -1276,6 +1276,11 @@ const (
|
||||||
SCM_CREDS = 0x3
|
SCM_CREDS = 0x3
|
||||||
SCM_RIGHTS = 0x1
|
SCM_RIGHTS = 0x1
|
||||||
SCM_TIMESTAMP = 0x2
|
SCM_TIMESTAMP = 0x2
|
||||||
|
SEEK_CUR = 0x1
|
||||||
|
SEEK_DATA = 0x3
|
||||||
|
SEEK_END = 0x2
|
||||||
|
SEEK_HOLE = 0x4
|
||||||
|
SEEK_SET = 0x0
|
||||||
SHUT_RD = 0x0
|
SHUT_RD = 0x0
|
||||||
SHUT_RDWR = 0x2
|
SHUT_RDWR = 0x2
|
||||||
SHUT_WR = 0x1
|
SHUT_WR = 0x1
|
||||||
|
|
|
@ -1298,6 +1298,11 @@ const (
|
||||||
SCM_RIGHTS = 0x1
|
SCM_RIGHTS = 0x1
|
||||||
SCM_TIMESTAMP = 0x2
|
SCM_TIMESTAMP = 0x2
|
||||||
SCM_TIME_INFO = 0x7
|
SCM_TIME_INFO = 0x7
|
||||||
|
SEEK_CUR = 0x1
|
||||||
|
SEEK_DATA = 0x3
|
||||||
|
SEEK_END = 0x2
|
||||||
|
SEEK_HOLE = 0x4
|
||||||
|
SEEK_SET = 0x0
|
||||||
SHUT_RD = 0x0
|
SHUT_RD = 0x0
|
||||||
SHUT_RDWR = 0x2
|
SHUT_RDWR = 0x2
|
||||||
SHUT_WR = 0x1
|
SHUT_WR = 0x1
|
||||||
|
|
|
@ -2284,6 +2284,12 @@ const (
|
||||||
SECCOMP_MODE_FILTER = 0x2
|
SECCOMP_MODE_FILTER = 0x2
|
||||||
SECCOMP_MODE_STRICT = 0x1
|
SECCOMP_MODE_STRICT = 0x1
|
||||||
SECURITYFS_MAGIC = 0x73636673
|
SECURITYFS_MAGIC = 0x73636673
|
||||||
|
SEEK_CUR = 0x1
|
||||||
|
SEEK_DATA = 0x3
|
||||||
|
SEEK_END = 0x2
|
||||||
|
SEEK_HOLE = 0x4
|
||||||
|
SEEK_MAX = 0x4
|
||||||
|
SEEK_SET = 0x0
|
||||||
SELINUX_MAGIC = 0xf97cff8c
|
SELINUX_MAGIC = 0xf97cff8c
|
||||||
SHUT_RD = 0x0
|
SHUT_RD = 0x0
|
||||||
SHUT_RDWR = 0x2
|
SHUT_RDWR = 0x2
|
||||||
|
|
|
@ -535,3 +535,107 @@ type CtlInfo struct {
|
||||||
Id uint32
|
Id uint32
|
||||||
Name [96]byte
|
Name [96]byte
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const SizeofKinfoProc = 0x288
|
||||||
|
|
||||||
|
type Eproc struct {
|
||||||
|
Paddr uintptr
|
||||||
|
Sess uintptr
|
||||||
|
Pcred Pcred
|
||||||
|
Ucred Ucred
|
||||||
|
Vm Vmspace
|
||||||
|
Ppid int32
|
||||||
|
Pgid int32
|
||||||
|
Jobc int16
|
||||||
|
Tdev int32
|
||||||
|
Tpgid int32
|
||||||
|
Tsess uintptr
|
||||||
|
Wmesg [8]int8
|
||||||
|
Xsize int32
|
||||||
|
Xrssize int16
|
||||||
|
Xccount int16
|
||||||
|
Xswrss int16
|
||||||
|
Flag int32
|
||||||
|
Login [12]int8
|
||||||
|
Spare [4]int32
|
||||||
|
_ [4]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
type ExternProc struct {
|
||||||
|
P_starttime Timeval
|
||||||
|
P_vmspace *Vmspace
|
||||||
|
P_sigacts uintptr
|
||||||
|
P_flag int32
|
||||||
|
P_stat int8
|
||||||
|
P_pid int32
|
||||||
|
P_oppid int32
|
||||||
|
P_dupfd int32
|
||||||
|
User_stack *int8
|
||||||
|
Exit_thread *byte
|
||||||
|
P_debugger int32
|
||||||
|
Sigwait int32
|
||||||
|
P_estcpu uint32
|
||||||
|
P_cpticks int32
|
||||||
|
P_pctcpu uint32
|
||||||
|
P_wchan *byte
|
||||||
|
P_wmesg *int8
|
||||||
|
P_swtime uint32
|
||||||
|
P_slptime uint32
|
||||||
|
P_realtimer Itimerval
|
||||||
|
P_rtime Timeval
|
||||||
|
P_uticks uint64
|
||||||
|
P_sticks uint64
|
||||||
|
P_iticks uint64
|
||||||
|
P_traceflag int32
|
||||||
|
P_tracep uintptr
|
||||||
|
P_siglist int32
|
||||||
|
P_textvp uintptr
|
||||||
|
P_holdcnt int32
|
||||||
|
P_sigmask uint32
|
||||||
|
P_sigignore uint32
|
||||||
|
P_sigcatch uint32
|
||||||
|
P_priority uint8
|
||||||
|
P_usrpri uint8
|
||||||
|
P_nice int8
|
||||||
|
P_comm [17]int8
|
||||||
|
P_pgrp uintptr
|
||||||
|
P_addr uintptr
|
||||||
|
P_xstat uint16
|
||||||
|
P_acflag uint16
|
||||||
|
P_ru *Rusage
|
||||||
|
}
|
||||||
|
|
||||||
|
type Itimerval struct {
|
||||||
|
Interval Timeval
|
||||||
|
Value Timeval
|
||||||
|
}
|
||||||
|
|
||||||
|
type KinfoProc struct {
|
||||||
|
Proc ExternProc
|
||||||
|
Eproc Eproc
|
||||||
|
}
|
||||||
|
|
||||||
|
type Vmspace struct {
|
||||||
|
Dummy int32
|
||||||
|
Dummy2 *int8
|
||||||
|
Dummy3 [5]int32
|
||||||
|
Dummy4 [3]*int8
|
||||||
|
}
|
||||||
|
|
||||||
|
type Pcred struct {
|
||||||
|
Pc_lock [72]int8
|
||||||
|
Pc_ucred uintptr
|
||||||
|
P_ruid uint32
|
||||||
|
P_svuid uint32
|
||||||
|
P_rgid uint32
|
||||||
|
P_svgid uint32
|
||||||
|
P_refcnt int32
|
||||||
|
_ [4]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
type Ucred struct {
|
||||||
|
Ref int32
|
||||||
|
Uid uint32
|
||||||
|
Ngroups int16
|
||||||
|
Groups [16]uint32
|
||||||
|
}
|
||||||
|
|
|
@ -535,3 +535,107 @@ type CtlInfo struct {
|
||||||
Id uint32
|
Id uint32
|
||||||
Name [96]byte
|
Name [96]byte
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const SizeofKinfoProc = 0x288
|
||||||
|
|
||||||
|
type Eproc struct {
|
||||||
|
Paddr uintptr
|
||||||
|
Sess uintptr
|
||||||
|
Pcred Pcred
|
||||||
|
Ucred Ucred
|
||||||
|
Vm Vmspace
|
||||||
|
Ppid int32
|
||||||
|
Pgid int32
|
||||||
|
Jobc int16
|
||||||
|
Tdev int32
|
||||||
|
Tpgid int32
|
||||||
|
Tsess uintptr
|
||||||
|
Wmesg [8]int8
|
||||||
|
Xsize int32
|
||||||
|
Xrssize int16
|
||||||
|
Xccount int16
|
||||||
|
Xswrss int16
|
||||||
|
Flag int32
|
||||||
|
Login [12]int8
|
||||||
|
Spare [4]int32
|
||||||
|
_ [4]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
type ExternProc struct {
|
||||||
|
P_starttime Timeval
|
||||||
|
P_vmspace *Vmspace
|
||||||
|
P_sigacts uintptr
|
||||||
|
P_flag int32
|
||||||
|
P_stat int8
|
||||||
|
P_pid int32
|
||||||
|
P_oppid int32
|
||||||
|
P_dupfd int32
|
||||||
|
User_stack *int8
|
||||||
|
Exit_thread *byte
|
||||||
|
P_debugger int32
|
||||||
|
Sigwait int32
|
||||||
|
P_estcpu uint32
|
||||||
|
P_cpticks int32
|
||||||
|
P_pctcpu uint32
|
||||||
|
P_wchan *byte
|
||||||
|
P_wmesg *int8
|
||||||
|
P_swtime uint32
|
||||||
|
P_slptime uint32
|
||||||
|
P_realtimer Itimerval
|
||||||
|
P_rtime Timeval
|
||||||
|
P_uticks uint64
|
||||||
|
P_sticks uint64
|
||||||
|
P_iticks uint64
|
||||||
|
P_traceflag int32
|
||||||
|
P_tracep uintptr
|
||||||
|
P_siglist int32
|
||||||
|
P_textvp uintptr
|
||||||
|
P_holdcnt int32
|
||||||
|
P_sigmask uint32
|
||||||
|
P_sigignore uint32
|
||||||
|
P_sigcatch uint32
|
||||||
|
P_priority uint8
|
||||||
|
P_usrpri uint8
|
||||||
|
P_nice int8
|
||||||
|
P_comm [17]int8
|
||||||
|
P_pgrp uintptr
|
||||||
|
P_addr uintptr
|
||||||
|
P_xstat uint16
|
||||||
|
P_acflag uint16
|
||||||
|
P_ru *Rusage
|
||||||
|
}
|
||||||
|
|
||||||
|
type Itimerval struct {
|
||||||
|
Interval Timeval
|
||||||
|
Value Timeval
|
||||||
|
}
|
||||||
|
|
||||||
|
type KinfoProc struct {
|
||||||
|
Proc ExternProc
|
||||||
|
Eproc Eproc
|
||||||
|
}
|
||||||
|
|
||||||
|
type Vmspace struct {
|
||||||
|
Dummy int32
|
||||||
|
Dummy2 *int8
|
||||||
|
Dummy3 [5]int32
|
||||||
|
Dummy4 [3]*int8
|
||||||
|
}
|
||||||
|
|
||||||
|
type Pcred struct {
|
||||||
|
Pc_lock [72]int8
|
||||||
|
Pc_ucred uintptr
|
||||||
|
P_ruid uint32
|
||||||
|
P_svuid uint32
|
||||||
|
P_rgid uint32
|
||||||
|
P_svgid uint32
|
||||||
|
P_refcnt int32
|
||||||
|
_ [4]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
type Ucred struct {
|
||||||
|
Ref int32
|
||||||
|
Uid uint32
|
||||||
|
Ngroups int16
|
||||||
|
Groups [16]uint32
|
||||||
|
}
|
||||||
|
|
|
@ -431,6 +431,9 @@ type Winsize struct {
|
||||||
const (
|
const (
|
||||||
AT_FDCWD = 0xfffafdcd
|
AT_FDCWD = 0xfffafdcd
|
||||||
AT_SYMLINK_NOFOLLOW = 0x1
|
AT_SYMLINK_NOFOLLOW = 0x1
|
||||||
|
AT_REMOVEDIR = 0x2
|
||||||
|
AT_EACCESS = 0x4
|
||||||
|
AT_SYMLINK_FOLLOW = 0x8
|
||||||
)
|
)
|
||||||
|
|
||||||
type PollFd struct {
|
type PollFd struct {
|
||||||
|
|
|
@ -672,9 +672,10 @@ type Winsize struct {
|
||||||
|
|
||||||
const (
|
const (
|
||||||
AT_FDCWD = -0x64
|
AT_FDCWD = -0x64
|
||||||
AT_REMOVEDIR = 0x800
|
AT_EACCESS = 0x100
|
||||||
AT_SYMLINK_FOLLOW = 0x400
|
|
||||||
AT_SYMLINK_NOFOLLOW = 0x200
|
AT_SYMLINK_NOFOLLOW = 0x200
|
||||||
|
AT_SYMLINK_FOLLOW = 0x400
|
||||||
|
AT_REMOVEDIR = 0x800
|
||||||
)
|
)
|
||||||
|
|
||||||
type PollFd struct {
|
type PollFd struct {
|
||||||
|
|
|
@ -675,9 +675,10 @@ type Winsize struct {
|
||||||
|
|
||||||
const (
|
const (
|
||||||
AT_FDCWD = -0x64
|
AT_FDCWD = -0x64
|
||||||
AT_REMOVEDIR = 0x800
|
AT_EACCESS = 0x100
|
||||||
AT_SYMLINK_FOLLOW = 0x400
|
|
||||||
AT_SYMLINK_NOFOLLOW = 0x200
|
AT_SYMLINK_NOFOLLOW = 0x200
|
||||||
|
AT_SYMLINK_FOLLOW = 0x400
|
||||||
|
AT_REMOVEDIR = 0x800
|
||||||
)
|
)
|
||||||
|
|
||||||
type PollFd struct {
|
type PollFd struct {
|
||||||
|
|
|
@ -656,9 +656,10 @@ type Winsize struct {
|
||||||
|
|
||||||
const (
|
const (
|
||||||
AT_FDCWD = -0x64
|
AT_FDCWD = -0x64
|
||||||
AT_REMOVEDIR = 0x800
|
AT_EACCESS = 0x100
|
||||||
AT_SYMLINK_FOLLOW = 0x400
|
|
||||||
AT_SYMLINK_NOFOLLOW = 0x200
|
AT_SYMLINK_NOFOLLOW = 0x200
|
||||||
|
AT_SYMLINK_FOLLOW = 0x400
|
||||||
|
AT_REMOVEDIR = 0x800
|
||||||
)
|
)
|
||||||
|
|
||||||
type PollFd struct {
|
type PollFd struct {
|
||||||
|
|
|
@ -653,9 +653,10 @@ type Winsize struct {
|
||||||
|
|
||||||
const (
|
const (
|
||||||
AT_FDCWD = -0x64
|
AT_FDCWD = -0x64
|
||||||
AT_REMOVEDIR = 0x800
|
AT_EACCESS = 0x100
|
||||||
AT_SYMLINK_FOLLOW = 0x400
|
|
||||||
AT_SYMLINK_NOFOLLOW = 0x200
|
AT_SYMLINK_NOFOLLOW = 0x200
|
||||||
|
AT_SYMLINK_FOLLOW = 0x400
|
||||||
|
AT_REMOVEDIR = 0x800
|
||||||
)
|
)
|
||||||
|
|
||||||
type PollFd struct {
|
type PollFd struct {
|
||||||
|
|
|
@ -445,8 +445,10 @@ type Ptmget struct {
|
||||||
|
|
||||||
const (
|
const (
|
||||||
AT_FDCWD = -0x64
|
AT_FDCWD = -0x64
|
||||||
AT_SYMLINK_FOLLOW = 0x400
|
AT_EACCESS = 0x100
|
||||||
AT_SYMLINK_NOFOLLOW = 0x200
|
AT_SYMLINK_NOFOLLOW = 0x200
|
||||||
|
AT_SYMLINK_FOLLOW = 0x400
|
||||||
|
AT_REMOVEDIR = 0x800
|
||||||
)
|
)
|
||||||
|
|
||||||
type PollFd struct {
|
type PollFd struct {
|
||||||
|
|
|
@ -453,8 +453,10 @@ type Ptmget struct {
|
||||||
|
|
||||||
const (
|
const (
|
||||||
AT_FDCWD = -0x64
|
AT_FDCWD = -0x64
|
||||||
AT_SYMLINK_FOLLOW = 0x400
|
AT_EACCESS = 0x100
|
||||||
AT_SYMLINK_NOFOLLOW = 0x200
|
AT_SYMLINK_NOFOLLOW = 0x200
|
||||||
|
AT_SYMLINK_FOLLOW = 0x400
|
||||||
|
AT_REMOVEDIR = 0x800
|
||||||
)
|
)
|
||||||
|
|
||||||
type PollFd struct {
|
type PollFd struct {
|
||||||
|
|
|
@ -450,8 +450,10 @@ type Ptmget struct {
|
||||||
|
|
||||||
const (
|
const (
|
||||||
AT_FDCWD = -0x64
|
AT_FDCWD = -0x64
|
||||||
AT_SYMLINK_FOLLOW = 0x400
|
AT_EACCESS = 0x100
|
||||||
AT_SYMLINK_NOFOLLOW = 0x200
|
AT_SYMLINK_NOFOLLOW = 0x200
|
||||||
|
AT_SYMLINK_FOLLOW = 0x400
|
||||||
|
AT_REMOVEDIR = 0x800
|
||||||
)
|
)
|
||||||
|
|
||||||
type PollFd struct {
|
type PollFd struct {
|
||||||
|
|
|
@ -453,8 +453,10 @@ type Ptmget struct {
|
||||||
|
|
||||||
const (
|
const (
|
||||||
AT_FDCWD = -0x64
|
AT_FDCWD = -0x64
|
||||||
AT_SYMLINK_FOLLOW = 0x400
|
AT_EACCESS = 0x100
|
||||||
AT_SYMLINK_NOFOLLOW = 0x200
|
AT_SYMLINK_NOFOLLOW = 0x200
|
||||||
|
AT_SYMLINK_FOLLOW = 0x400
|
||||||
|
AT_REMOVEDIR = 0x800
|
||||||
)
|
)
|
||||||
|
|
||||||
type PollFd struct {
|
type PollFd struct {
|
||||||
|
|
|
@ -438,8 +438,10 @@ type Winsize struct {
|
||||||
|
|
||||||
const (
|
const (
|
||||||
AT_FDCWD = -0x64
|
AT_FDCWD = -0x64
|
||||||
AT_SYMLINK_FOLLOW = 0x4
|
AT_EACCESS = 0x1
|
||||||
AT_SYMLINK_NOFOLLOW = 0x2
|
AT_SYMLINK_NOFOLLOW = 0x2
|
||||||
|
AT_SYMLINK_FOLLOW = 0x4
|
||||||
|
AT_REMOVEDIR = 0x8
|
||||||
)
|
)
|
||||||
|
|
||||||
type PollFd struct {
|
type PollFd struct {
|
||||||
|
|
|
@ -438,8 +438,10 @@ type Winsize struct {
|
||||||
|
|
||||||
const (
|
const (
|
||||||
AT_FDCWD = -0x64
|
AT_FDCWD = -0x64
|
||||||
AT_SYMLINK_FOLLOW = 0x4
|
AT_EACCESS = 0x1
|
||||||
AT_SYMLINK_NOFOLLOW = 0x2
|
AT_SYMLINK_NOFOLLOW = 0x2
|
||||||
|
AT_SYMLINK_FOLLOW = 0x4
|
||||||
|
AT_REMOVEDIR = 0x8
|
||||||
)
|
)
|
||||||
|
|
||||||
type PollFd struct {
|
type PollFd struct {
|
||||||
|
|
|
@ -439,8 +439,10 @@ type Winsize struct {
|
||||||
|
|
||||||
const (
|
const (
|
||||||
AT_FDCWD = -0x64
|
AT_FDCWD = -0x64
|
||||||
AT_SYMLINK_FOLLOW = 0x4
|
AT_EACCESS = 0x1
|
||||||
AT_SYMLINK_NOFOLLOW = 0x2
|
AT_SYMLINK_NOFOLLOW = 0x2
|
||||||
|
AT_SYMLINK_FOLLOW = 0x4
|
||||||
|
AT_REMOVEDIR = 0x8
|
||||||
)
|
)
|
||||||
|
|
||||||
type PollFd struct {
|
type PollFd struct {
|
||||||
|
|
|
@ -432,8 +432,10 @@ type Winsize struct {
|
||||||
|
|
||||||
const (
|
const (
|
||||||
AT_FDCWD = -0x64
|
AT_FDCWD = -0x64
|
||||||
AT_SYMLINK_FOLLOW = 0x4
|
AT_EACCESS = 0x1
|
||||||
AT_SYMLINK_NOFOLLOW = 0x2
|
AT_SYMLINK_NOFOLLOW = 0x2
|
||||||
|
AT_SYMLINK_FOLLOW = 0x4
|
||||||
|
AT_REMOVEDIR = 0x8
|
||||||
)
|
)
|
||||||
|
|
||||||
type PollFd struct {
|
type PollFd struct {
|
||||||
|
|
|
@ -432,8 +432,10 @@ type Winsize struct {
|
||||||
|
|
||||||
const (
|
const (
|
||||||
AT_FDCWD = -0x64
|
AT_FDCWD = -0x64
|
||||||
AT_SYMLINK_FOLLOW = 0x4
|
AT_EACCESS = 0x1
|
||||||
AT_SYMLINK_NOFOLLOW = 0x2
|
AT_SYMLINK_NOFOLLOW = 0x2
|
||||||
|
AT_SYMLINK_FOLLOW = 0x4
|
||||||
|
AT_REMOVEDIR = 0x8
|
||||||
)
|
)
|
||||||
|
|
||||||
type PollFd struct {
|
type PollFd struct {
|
||||||
|
|
|
@ -83,7 +83,7 @@ github.com/containers/common/pkg/supplemented
|
||||||
github.com/containers/common/pkg/timetype
|
github.com/containers/common/pkg/timetype
|
||||||
github.com/containers/common/pkg/umask
|
github.com/containers/common/pkg/umask
|
||||||
github.com/containers/common/version
|
github.com/containers/common/version
|
||||||
# github.com/containers/image/v5 v5.13.2
|
# github.com/containers/image/v5 v5.14.0
|
||||||
github.com/containers/image/v5/copy
|
github.com/containers/image/v5/copy
|
||||||
github.com/containers/image/v5/directory
|
github.com/containers/image/v5/directory
|
||||||
github.com/containers/image/v5/directory/explicitfilepath
|
github.com/containers/image/v5/directory/explicitfilepath
|
||||||
|
@ -163,6 +163,9 @@ github.com/containers/storage/drivers/windows
|
||||||
github.com/containers/storage/drivers/zfs
|
github.com/containers/storage/drivers/zfs
|
||||||
github.com/containers/storage/pkg/archive
|
github.com/containers/storage/pkg/archive
|
||||||
github.com/containers/storage/pkg/chrootarchive
|
github.com/containers/storage/pkg/chrootarchive
|
||||||
|
github.com/containers/storage/pkg/chunked
|
||||||
|
github.com/containers/storage/pkg/chunked/compressor
|
||||||
|
github.com/containers/storage/pkg/chunked/internal
|
||||||
github.com/containers/storage/pkg/config
|
github.com/containers/storage/pkg/config
|
||||||
github.com/containers/storage/pkg/devicemapper
|
github.com/containers/storage/pkg/devicemapper
|
||||||
github.com/containers/storage/pkg/directory
|
github.com/containers/storage/pkg/directory
|
||||||
|
@ -463,7 +466,7 @@ github.com/ulikunitz/xz/lzma
|
||||||
github.com/vbatts/tar-split/archive/tar
|
github.com/vbatts/tar-split/archive/tar
|
||||||
github.com/vbatts/tar-split/tar/asm
|
github.com/vbatts/tar-split/tar/asm
|
||||||
github.com/vbatts/tar-split/tar/storage
|
github.com/vbatts/tar-split/tar/storage
|
||||||
# github.com/vbauerster/mpb/v7 v7.0.2
|
# github.com/vbauerster/mpb/v7 v7.0.3
|
||||||
github.com/vbauerster/mpb/v7
|
github.com/vbauerster/mpb/v7
|
||||||
github.com/vbauerster/mpb/v7/cwriter
|
github.com/vbauerster/mpb/v7/cwriter
|
||||||
github.com/vbauerster/mpb/v7/decor
|
github.com/vbauerster/mpb/v7/decor
|
||||||
|
@ -511,7 +514,7 @@ golang.org/x/net/proxy
|
||||||
golang.org/x/net/trace
|
golang.org/x/net/trace
|
||||||
# golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
# golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||||
golang.org/x/sync/semaphore
|
golang.org/x/sync/semaphore
|
||||||
# golang.org/x/sys v0.0.0-20210603125802-9665404d3644
|
# golang.org/x/sys v0.0.0-20210616094352-59db8d763f22
|
||||||
golang.org/x/sys/execabs
|
golang.org/x/sys/execabs
|
||||||
golang.org/x/sys/internal/unsafeheader
|
golang.org/x/sys/internal/unsafeheader
|
||||||
golang.org/x/sys/plan9
|
golang.org/x/sys/plan9
|
||||||
|
|
Loading…
Reference in New Issue