This commit is contained in:
github-actions[bot] 2025-10-07 22:02:18 +00:00
parent ca4526f3bb
commit 1e098da124
47 changed files with 536 additions and 140 deletions

10
go.mod
View File

@ -65,7 +65,7 @@ require (
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/disiqueira/gotree/v3 v3.0.2 // indirect
github.com/distribution/reference v0.6.0 // indirect
github.com/docker/docker-credential-helpers v0.9.3 // indirect
github.com/docker/docker-credential-helpers v0.9.4 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fsnotify/fsnotify v1.9.0 // indirect
github.com/go-jose/go-jose/v4 v4.0.5 // indirect
@ -89,7 +89,7 @@ require (
github.com/mattn/go-runewidth v0.0.16 // indirect
github.com/mattn/go-sqlite3 v1.14.32 // indirect
github.com/miekg/pkcs11 v1.1.1 // indirect
github.com/mistifyio/go-zfs/v3 v3.0.1 // indirect
github.com/mistifyio/go-zfs/v3 v3.1.0 // indirect
github.com/moby/docker-image-spec v1.3.1 // indirect
github.com/moby/go-archive v0.1.0 // indirect
github.com/moby/patternmatcher v0.6.0 // indirect
@ -137,3 +137,9 @@ require (
sigs.k8s.io/yaml v1.6.0 // indirect
tags.cncf.io/container-device-interface/specs-go v1.0.0 // indirect
)
replace go.podman.io/common => github.com/lsm5/container-libs/common v0.0.0-20251007215947-621025c4c891
replace go.podman.io/storage => github.com/lsm5/container-libs/storage v0.0.0-20251007215947-621025c4c891
replace go.podman.io/image/v5 => github.com/lsm5/container-libs/image/v5 v5.0.0-20251007215947-621025c4c891

28
go.sum
View File

@ -72,14 +72,14 @@ github.com/disiqueira/gotree/v3 v3.0.2 h1:ik5iuLQQoufZBNPY518dXhiO5056hyNBIK9lWh
github.com/disiqueira/gotree/v3 v3.0.2/go.mod h1:ZuyjE4+mUQZlbpkI24AmruZKhg3VHEgPLDY8Qk+uUu8=
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/docker/cli v28.4.0+incompatible h1:RBcf3Kjw2pMtwui5V0DIMdyeab8glEw5QY0UUU4C9kY=
github.com/docker/cli v28.4.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/cli v28.5.0+incompatible h1:crVqLrtKsrhC9c00ythRx435H8LiQnUKRtJLRR+Auxk=
github.com/docker/cli v28.5.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v28.5.0+incompatible h1:ZdSQoRUE9XxhFI/B8YLvhnEFMmYN9Pp8Egd2qcaFk1E=
github.com/docker/docker v28.5.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.9.3 h1:gAm/VtF9wgqJMoxzT3Gj5p4AqIjCBS4wrsOh9yRqcz8=
github.com/docker/docker-credential-helpers v0.9.3/go.mod h1:x+4Gbw9aGmChi3qTLZj8Dfn0TD20M/fuWy0E5+WDeCo=
github.com/docker/docker-credential-helpers v0.9.4 h1:76ItO69/AP/V4yT9V4uuuItG0B1N8hvt0T0c0NN/DzI=
github.com/docker/docker-credential-helpers v0.9.4/go.mod h1:v1S+hepowrQXITkEfw6o4+BMbGot02wiKpzWhGUZK6c=
github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94=
github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE=
github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8=
@ -152,6 +152,12 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec h1:2tTW6cDth2TSgRbAhD7yjZzTQmcN25sDRPEeinR51yQ=
github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec/go.mod h1:TmwEoGCwIti7BCeJ9hescZgRtatxRE+A72pCoPfmcfk=
github.com/lsm5/container-libs/common v0.0.0-20251007215947-621025c4c891 h1:s+C6r7gwu8W27uTv6GdOuOvZQhNh7QaSjplkpNzUvCc=
github.com/lsm5/container-libs/common v0.0.0-20251007215947-621025c4c891/go.mod h1:W6vnjTztyCQX8TIIuBII2jGVeRAET7RqCOlz8DiH8g8=
github.com/lsm5/container-libs/image/v5 v5.0.0-20251007215947-621025c4c891 h1:hY5gq2daF/iGc/Ydp3rqXHrA4OUOfPpXdqa4gC5q6z8=
github.com/lsm5/container-libs/image/v5 v5.0.0-20251007215947-621025c4c891/go.mod h1:XVw9N/RHN7HZQH6fHn7gENnrejqqqYG6OtnZVCW0k48=
github.com/lsm5/container-libs/storage v0.0.0-20251007215947-621025c4c891 h1:W6yXFU0+fVn9HHHoj/t8LoaM8/VJm4zKgKCvhyrqeC8=
github.com/lsm5/container-libs/storage v0.0.0-20251007215947-621025c4c891/go.mod h1:mz7wN7mBeQUxh/iYq9uoROZFO9bUuq85BLqQCdwbibI=
github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA=
github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg=
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
@ -162,8 +168,8 @@ github.com/mattn/go-sqlite3 v1.14.32 h1:JD12Ag3oLy1zQA+BNn74xRgaBbdhbNIDYvQUEuuE
github.com/mattn/go-sqlite3 v1.14.32/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU=
github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
github.com/mistifyio/go-zfs/v3 v3.0.1 h1:YaoXgBePoMA12+S1u/ddkv+QqxcfiZK4prI6HPnkFiU=
github.com/mistifyio/go-zfs/v3 v3.0.1/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k=
github.com/mistifyio/go-zfs/v3 v3.1.0 h1:FZaylcg0hjUp27i23VcJJQiuBeAZjrC8lPqCGM1CopY=
github.com/mistifyio/go-zfs/v3 v3.1.0/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k=
github.com/moby/buildkit v0.25.0 h1:cRgh74ymzyHxS5a/lsYT4OCyVU8iC3UgkwasIEUi0og=
github.com/moby/buildkit v0.25.0/go.mod h1:phM8sdqnvgK2y1dPDnbwI6veUCXHOZ6KFSl6E164tkc=
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
@ -195,8 +201,8 @@ github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/onsi/ginkgo/v2 v2.25.3 h1:Ty8+Yi/ayDAGtk4XxmmfUy4GabvM+MegeB4cDLRi6nw=
github.com/onsi/ginkgo/v2 v2.25.3/go.mod h1:43uiyQC4Ed2tkOzLsEYm7hnrb7UJTWHYNsuy3bG/snE=
github.com/onsi/ginkgo/v2 v2.26.0 h1:1J4Wut1IlYZNEAWIV3ALrT9NfiaGW2cDCJQSFQMs/gE=
github.com/onsi/ginkgo/v2 v2.26.0/go.mod h1:qhEywmzWTBUY88kfO0BRvX4py7scov9yR+Az2oavUzw=
github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A=
github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k=
github.com/opencontainers/cgroups v0.0.5 h1:DRITAqcOnY0uSBzIpt1RYWLjh5DPDiqUs4fY6Y0ktls=
@ -317,12 +323,6 @@ go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKr
go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA=
go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4=
go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4=
go.podman.io/common v0.65.1-0.20250916163606-92222dcd3da4 h1:YjBqTOxz4cqfpifcd71VoBl1FTQL2U2La5NgMqmRRqU=
go.podman.io/common v0.65.1-0.20250916163606-92222dcd3da4/go.mod h1:DyOdwtkwzYA8lE0TueJnxRju4Lmsrx6ZAC/ATAkYYck=
go.podman.io/image/v5 v5.37.1-0.20250916163606-92222dcd3da4 h1:hfc3lZaxi6KGnWN3IusIaCkcMPR4rTR+vWZzakeD1EA=
go.podman.io/image/v5 v5.37.1-0.20250916163606-92222dcd3da4/go.mod h1:cGWb3IyBziJGxhFikTOlt9Ap+zo6s3rz9Qd1rbzqs4s=
go.podman.io/storage v1.60.1-0.20250916163606-92222dcd3da4 h1:jo0PSKh6muU7rmhXXqOV9aK+HrA8koqs47KhBsZf6LY=
go.podman.io/storage v1.60.1-0.20250916163606-92222dcd3da4/go.mod h1:AeZXAN8Qu1gTlAEHIc6mVhxk+61oMSM3K3iLx5UAQWE=
go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8=
go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=

View File

@ -2,6 +2,7 @@ package zfs
import (
"bytes"
"context"
"errors"
"fmt"
"io"
@ -10,10 +11,37 @@ import (
"runtime"
"strconv"
"strings"
"sync/atomic"
"syscall"
"time"
"github.com/google/uuid"
)
// Runner specifies the parameters used when executing ZFS commands.
type Runner struct {
// Timeout specifies how long to wait before sending a SIGTERM signal to the running process.
Timeout time.Duration
// Grace specifies the time waited after signaling the running process with SIGTERM before it is forcefully
// killed with SIGKILL.
Grace time.Duration
}
var defaultRunner atomic.Value
func init() {
defaultRunner.Store(&Runner{})
}
func Default() *Runner {
return defaultRunner.Load().(*Runner) //nolint: forcetypeassert // Impossible for it to be anything else.
}
func SetRunner(runner *Runner) {
defaultRunner.Store(runner)
}
type command struct {
Command string
Stdin io.Reader
@ -21,7 +49,19 @@ type command struct {
}
func (c *command) Run(arg ...string) ([][]string, error) {
cmd := exec.Command(c.Command, arg...)
var cmd *exec.Cmd
if Default().Timeout == 0 {
cmd = exec.Command(c.Command, arg...)
} else {
ctx, cancel := context.WithTimeout(context.Background(), Default().Timeout)
defer cancel()
cmd = exec.CommandContext(ctx, c.Command, arg...)
cmd.Cancel = func() error {
return cmd.Process.Signal(syscall.SIGTERM)
}
cmd.WaitDelay = Default().Grace
}
var stdout, stderr bytes.Buffer

View File

@ -707,6 +707,13 @@ type Destination struct {
// Identity file with ssh key, optional
Identity string `json:",omitempty" toml:"identity,omitempty"`
// Path to TLS client certificate PEM file, optional
TLSCert string `json:",omitempty" toml:"tls_cert,omitempty"`
// Path to TLS client certificate private key PEM file, optional
TLSKey string `json:",omitempty" toml:"tls_key,omitempty"`
// Path to TLS certificate authority PEM file, optional
TLSCA string `json:",omitempty" toml:"tls_ca,omitempty"`
// isMachine describes if the remote destination is a machine.
IsMachine bool `json:",omitempty" toml:"is_machine,omitempty"`
}

View File

@ -779,10 +779,17 @@ default_sysctls = [
# rootful "unix:///run/podman/podman.sock (Default)
# remote rootless ssh://engineering.lab.company.com/run/user/1000/podman/podman.sock
# remote rootful ssh://root@10.10.1.136:22/run/podman/podman.sock
# tcp/tls remote tcp://10.10.1.136:9443
#
# uri = "ssh://user@production.example.com/run/user/1001/podman/podman.sock"
# Path to file containing ssh identity key
# identity = "~/.ssh/id_rsa"
# Path to PEM file containing TLS client certificate
# tls_cert = "/path/to/certs/podman/tls.crt"
# Path to PEM file containing TLS client certificate private key
# tls_key = "/path/to/certs/podman/tls.key"
# Path to PEM file containing TLS certificate authority (CA) bundle
# tls_ca = "/path/to/certs/podman/ca.crt"
# Directory for temporary files. Must be tmpfs (wiped after reboot)
#

View File

@ -598,10 +598,17 @@ default_sysctls = [
# rootful "unix:///run/podman/podman.sock (Default)
# remote rootless ssh://engineering.lab.company.com/run/user/1000/podman/podman.sock
# remote rootful ssh://root@10.10.1.136:22/run/podman/podman.sock
# tcp/tls remote tcp://10.10.1.136:9443
#
# uri = "ssh://user@production.example.com/run/user/1001/podman/podman.sock"
# Path to file containing ssh identity key
# identity = "~/.ssh/id_rsa"
# Path to PEM file containing TLS client certificate
# tls_cert = "/path/to/certs/podman/tls.crt"
# Path to PEM file containing TLS client certificate private key
# tls_key = "/path/to/certs/podman/tls.key"
# Path to PEM file containing TLS certificate authority (CA) bundle
# tls_ca = "/path/to/certs/podman/ca.crt"
# Directory for temporary files. Must be tmpfs (wiped after reboot)
#

View File

@ -28,6 +28,7 @@ import (
"go.podman.io/image/v5/transports"
"go.podman.io/image/v5/types"
chunkedToc "go.podman.io/storage/pkg/chunked/toc"
supportedDigests "go.podman.io/storage/pkg/supported-digests"
)
// imageCopier tracks state specific to a single image (possibly an item of a manifest list)
@ -977,7 +978,15 @@ func diffIDComputationGoroutine(dest chan<- diffIDResult, layerStream io.ReadClo
}
// computeDiffID reads all input from layerStream, uncompresses it using decompressor if necessary, and returns its digest.
// This is a wrapper around computeDiffIDWithAlgorithm that uses the globally configured digest algorithm.
func computeDiffID(stream io.Reader, decompressor compressiontypes.DecompressorFunc) (digest.Digest, error) {
algorithm := supportedDigests.TmpDigestForNewObjects()
return computeDiffIDWithAlgorithm(stream, decompressor, algorithm)
}
// computeDiffIDWithAlgorithm reads all input from layerStream, uncompresses it using decompressor if necessary,
// and returns its digest using the specified algorithm.
func computeDiffIDWithAlgorithm(stream io.Reader, decompressor compressiontypes.DecompressorFunc, algorithm digest.Algorithm) (digest.Digest, error) {
if decompressor != nil {
s, err := decompressor(stream)
if err != nil {
@ -987,7 +996,7 @@ func computeDiffID(stream io.Reader, decompressor compressiontypes.DecompressorF
stream = s
}
return digest.Canonical.FromReader(stream)
return algorithm.FromReader(stream)
}
// algorithmsByNames returns slice of Algorithms from a sequence of Algorithm Names

View File

@ -151,7 +151,7 @@ func (d *dirImageDestination) PutBlobWithOptions(ctx context.Context, stream io.
}
}()
digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo)
digester, stream := putblobdigest.DigestIfConfiguredUnknown(stream, inputInfo)
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
size, err := io.Copy(blobFile, stream)
if err != nil {

View File

@ -178,7 +178,7 @@ func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream
return private.UploadedBlob{}, fmt.Errorf("determining upload URL: %w", err)
}
digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo)
digester, stream := putblobdigest.DigestIfConfiguredUnknown(stream, inputInfo)
sizeCounter := &sizeCounter{}
stream = io.TeeReader(stream, sizeCounter)

View File

@ -9,6 +9,7 @@ import (
"go.podman.io/image/v5/docker/reference"
"go.podman.io/image/v5/manifest"
"go.podman.io/image/v5/types"
supportedDigests "go.podman.io/storage/pkg/supported-digests"
)
type manifestSchema1 struct {
@ -160,6 +161,13 @@ func (m *manifestSchema1) convertToManifestSchema2Generic(ctx context.Context, o
//
// Based on github.com/docker/docker/distribution/pull_v2.go
func (m *manifestSchema1) convertToManifestSchema2(_ context.Context, options *types.ManifestUpdateOptions) (*manifestSchema2, error) {
// Explicitly reject SHA512+Schema1 combinations as they are not supported
// Schema1 is deprecated and Docker/registry don't support SHA512+Schema1
configuredAlgorithm := supportedDigests.TmpDigestForNewObjects()
if configuredAlgorithm == digest.SHA512 {
return nil, fmt.Errorf("SHA512+Schema1 is not supported: Schema1 is deprecated and Docker/registry do not support SHA512 with Schema1 manifests. Please use SHA256 or convert to Schema2/OCI format")
}
uploadedLayerInfos := options.InformationOnly.LayerInfos
layerDiffIDs := options.InformationOnly.LayerDiffIDs
@ -219,7 +227,7 @@ func (m *manifestSchema1) convertToManifestSchema2(_ context.Context, options *t
configDescriptor := manifest.Schema2Descriptor{
MediaType: manifest.DockerV2Schema2ConfigMediaType,
Size: int64(len(configJSON)),
Digest: digest.FromBytes(configJSON),
Digest: supportedDigests.TmpDigestForNewObjects().FromBytes(configJSON),
}
if options.LayerInfos != nil {

View File

@ -17,6 +17,7 @@ import (
"go.podman.io/image/v5/internal/iolimits"
"go.podman.io/image/v5/manifest"
"go.podman.io/image/v5/pkg/blobinfocache/none"
"go.podman.io/image/v5/pkg/digestvalidation"
"go.podman.io/image/v5/types"
)
@ -110,9 +111,11 @@ func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) {
if err != nil {
return nil, err
}
computedDigest := digest.FromBytes(blob)
if computedDigest != m.m.ConfigDescriptor.Digest {
return nil, fmt.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.ConfigDescriptor.Digest)
expectedDigest := m.m.ConfigDescriptor.Digest
// Validate the blob against the expected digest using centralized validation
if err := digestvalidation.ValidateBlobAgainstDigest(blob, expectedDigest); err != nil {
return nil, fmt.Errorf("config descriptor validation failed: %w", err)
}
m.configBlob = blob
}

View File

@ -8,7 +8,6 @@ import (
"slices"
ociencspec "github.com/containers/ocicrypt/spec"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"go.podman.io/image/v5/docker/reference"
"go.podman.io/image/v5/internal/iolimits"
@ -74,9 +73,12 @@ func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) {
if err != nil {
return nil, err
}
computedDigest := digest.FromBytes(blob)
if computedDigest != m.m.Config.Digest {
return nil, fmt.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.Config.Digest)
// Use the same algorithm as the expected digest
expectedDigest := m.m.Config.Digest
algorithm := expectedDigest.Algorithm()
computedDigest := algorithm.FromBytes(blob)
if computedDigest != expectedDigest {
return nil, fmt.Errorf("Download config.json digest %s does not match expected %s", computedDigest, expectedDigest)
}
m.configBlob = blob
}

View File

@ -8,6 +8,7 @@ import (
digest "github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
compressiontypes "go.podman.io/image/v5/pkg/compression/types"
supportedDigests "go.podman.io/storage/pkg/supported-digests"
)
// FIXME: Should we just use docker/distribution and docker/docker implementations directly?
@ -123,7 +124,7 @@ func Digest(manifest []byte) (digest.Digest, error) {
}
}
return digest.FromBytes(manifest), nil
return supportedDigests.TmpDigestForNewObjects().FromBytes(manifest), nil
}
// MatchesDigest returns true iff the manifest matches expectedDigest.

View File

@ -5,6 +5,7 @@ import (
"github.com/opencontainers/go-digest"
"go.podman.io/image/v5/types"
supportedDigests "go.podman.io/storage/pkg/supported-digests"
)
// Digester computes a digest of the provided stream, if not known yet.
@ -13,7 +14,7 @@ type Digester struct {
digester digest.Digester // Or nil
}
// newDigester initiates computation of a digest.Canonical digest of stream,
// newDigester initiates computation of a digest (using the configured algorithm) of stream,
// if !validDigest; otherwise it just records knownDigest to be returned later.
// The caller MUST use the returned stream instead of the original value.
func newDigester(stream io.Reader, knownDigest digest.Digest, validDigest bool) (Digester, io.Reader) {
@ -21,7 +22,7 @@ func newDigester(stream io.Reader, knownDigest digest.Digest, validDigest bool)
return Digester{knownDigest: knownDigest}, stream
} else {
res := Digester{
digester: digest.Canonical.Digester(),
digester: supportedDigests.TmpDigestForNewObjects().Digester(),
}
stream = io.TeeReader(stream, res.digester.Hash())
return res, stream
@ -37,13 +38,14 @@ func DigestIfUnknown(stream io.Reader, blobInfo types.BlobInfo) (Digester, io.Re
return newDigester(stream, d, d != "")
}
// DigestIfCanonicalUnknown initiates computation of a digest.Canonical digest of stream,
// if a digest.Canonical digest is not supplied in the provided blobInfo;
// DigestIfConfiguredUnknown initiates computation of a digest (using the configured algorithm) of stream,
// if a digest with the configured algorithm is not supplied in the provided blobInfo;
// otherwise blobInfo.Digest will be used.
// The caller MUST use the returned stream instead of the original value.
func DigestIfCanonicalUnknown(stream io.Reader, blobInfo types.BlobInfo) (Digester, io.Reader) {
func DigestIfConfiguredUnknown(stream io.Reader, blobInfo types.BlobInfo) (Digester, io.Reader) {
d := blobInfo.Digest
return newDigester(stream, d, d != "" && d.Algorithm() == digest.Canonical)
configuredAlgorithm := supportedDigests.TmpDigestForNewObjects()
return newDigester(stream, d, d != "" && d.Algorithm() == configuredAlgorithm)
}
// Digest() returns a digest value possibly computed by Digester.

View File

@ -23,7 +23,7 @@ func ComputeBlobInfo(sys *types.SystemContext, stream io.Reader, inputInfo *type
diskBlob.Close()
os.Remove(diskBlob.Name())
}
digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, *inputInfo)
digester, stream := putblobdigest.DigestIfConfiguredUnknown(stream, *inputInfo)
written, err := io.Copy(diskBlob, stream)
if err != nil {
cleanup()

View File

@ -134,7 +134,7 @@ func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io.
}
}()
digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo)
digester, stream := putblobdigest.DigestIfConfiguredUnknown(stream, inputInfo)
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
size, err := io.Copy(blobFile, stream)
if err != nil {

View File

@ -21,6 +21,7 @@ import (
"go.podman.io/image/v5/types"
"go.podman.io/storage/pkg/archive"
"go.podman.io/storage/pkg/ioutils"
supportedDigests "go.podman.io/storage/pkg/supported-digests"
)
type blobCacheDestination struct {
@ -92,7 +93,7 @@ func (d *blobCacheDestination) saveStream(wg *sync.WaitGroup, decompressReader i
}
}()
digester := digest.Canonical.Digester()
digester := supportedDigests.TmpDigestForNewObjects().Digester()
if err := func() error { // A scope for defer
defer tempFile.Close()

View File

@ -0,0 +1,50 @@
package digestvalidation
import (
"fmt"
"github.com/opencontainers/go-digest"
supportedDigests "go.podman.io/storage/pkg/supported-digests"
)
// ValidateBlobAgainstDigest validates that the provided blob matches the expected digest.
// It performs comprehensive validation to prevent panics from malformed digests or unsupported algorithms.
//
// This function handles the following validation steps:
// 1. Empty digest check
// 2. Digest format validation using digest.Parse()
// 3. Algorithm validation
// 4. Algorithm support validation using supported-digests package
// 5. Content validation by computing and comparing digests
//
// Returns an error if any validation step fails, with specific error messages for different failure cases.
func ValidateBlobAgainstDigest(blob []byte, expectedDigest digest.Digest) error {
// Validate the digest format to prevent panics from invalid digests
if expectedDigest == "" {
return fmt.Errorf("expected digest is empty")
}
// Parse the digest to validate its format before calling Algorithm()
parsedDigest, err := digest.Parse(expectedDigest.String())
if err != nil {
return fmt.Errorf("invalid digest format: %s", expectedDigest)
}
algorithm := parsedDigest.Algorithm()
if algorithm == "" {
return fmt.Errorf("invalid digest algorithm: %s", expectedDigest)
}
// Validate that the algorithm is supported to prevent panics from FromBytes
if !supportedDigests.IsSupportedDigestAlgorithm(algorithm) {
return fmt.Errorf("unsupported digest algorithm: %s (supported: %v)", algorithm, supportedDigests.GetSupportedDigestAlgorithms())
}
// Compute the actual digest of the blob
computedDigest := algorithm.FromBytes(blob)
if computedDigest != expectedDigest {
return fmt.Errorf("blob digest mismatch: expected %s, got %s", expectedDigest, computedDigest)
}
return nil
}

View File

@ -37,6 +37,7 @@ import (
"go.podman.io/storage/pkg/chunked"
"go.podman.io/storage/pkg/chunked/toc"
"go.podman.io/storage/pkg/ioutils"
supportedDigests "go.podman.io/storage/pkg/supported-digests"
)
var (
@ -289,7 +290,7 @@ func (s *storageImageDestination) putBlobToPendingFile(stream io.Reader, blobinf
}
defer decompressed.Close()
diffID := digest.Canonical.Digester()
diffID := supportedDigests.TmpDigestForNewObjects().Digester()
// Copy the data to the file.
// TODO: This can take quite some time, and should ideally be cancellable using context.Context.
_, err = io.Copy(diffID.Hash(), decompressed)
@ -1033,11 +1034,19 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
return false, err
}
} else if trusted.diffID != untrustedDiffID {
return false, fmt.Errorf("layer %d (blob %s) does not match config's DiffID %q", index, trusted.logString(), untrustedDiffID)
// If the algorithms don't match, we need to handle this carefully
if trusted.diffID.Algorithm() != untrustedDiffID.Algorithm() {
// This is a critical security check - we cannot allow algorithm mismatches
// without proper validation. For now, we'll reject the layer to maintain security.
return false, fmt.Errorf("layer %d diffID algorithm mismatch: trusted=%s, config=%s - this indicates a potential security issue",
index, trusted.diffID.Algorithm(), untrustedDiffID.Algorithm())
} else {
return false, fmt.Errorf("layer %d (blob %s) does not match config's DiffID %q", index, trusted.logString(), untrustedDiffID)
}
}
}
id := layerID(parentLayer, trusted)
id := layerID(parentLayer, trusted, supportedDigests.TmpDigestForNewObjects())
if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil {
// There's already a layer that should have the right contents, just reuse it.
@ -1056,8 +1065,8 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
return false, nil
}
// layerID computes a layer (“chain”) ID for (a possibly-empty parentID, trusted)
func layerID(parentID string, trusted trustedLayerIdentityData) string {
// layerID computes a layer ("chain") ID for (a possibly-empty parentID, trusted)
func layerID(parentID string, trusted trustedLayerIdentityData, algorithm digest.Algorithm) string {
var component string
mustHash := false
if trusted.layerIdentifiedByTOC {
@ -1072,7 +1081,7 @@ func layerID(parentID string, trusted trustedLayerIdentityData) string {
if parentID == "" && !mustHash {
return component
}
return digest.Canonical.FromString(parentID + "+" + component).Encoded()
return algorithm.FromString(parentID + "+" + component).Encoded()
}
// createNewLayer creates a new layer newLayerID for (index, trusted) on top of parentLayer (which may be "").
@ -1490,16 +1499,15 @@ func (s *storageImageDestination) CommitWithOptions(ctx context.Context, options
imgOptions.BigData = append(imgOptions.BigData, storage.ImageBigDataOption{
Key: s.lockProtected.configDigest.String(),
Data: v,
Digest: digest.Canonical.FromBytes(v),
Digest: supportedDigests.TmpDigestForNewObjects().FromBytes(v),
})
}
// Set up to save the options.UnparsedToplevel's manifest if it differs from
// the per-platform one, which is saved below.
if !bytes.Equal(toplevelManifest, s.manifest) {
manifestDigest, err := manifest.Digest(toplevelManifest)
if err != nil {
return fmt.Errorf("digesting top-level manifest: %w", err)
}
// Use the configured digest algorithm for manifest digest
algorithm := supportedDigests.TmpDigestForNewObjects()
manifestDigest := algorithm.FromBytes(toplevelManifest)
key, err := manifestBigDataKey(manifestDigest)
if err != nil {
return err
@ -1532,7 +1540,7 @@ func (s *storageImageDestination) CommitWithOptions(ctx context.Context, options
imgOptions.BigData = append(imgOptions.BigData, storage.ImageBigDataOption{
Key: "signatures",
Data: s.signatures,
Digest: digest.Canonical.FromBytes(s.signatures),
Digest: supportedDigests.TmpDigestForNewObjects().FromBytes(s.signatures),
})
}
for instanceDigest, signatures := range s.signatureses {
@ -1543,7 +1551,7 @@ func (s *storageImageDestination) CommitWithOptions(ctx context.Context, options
imgOptions.BigData = append(imgOptions.BigData, storage.ImageBigDataOption{
Key: key,
Data: signatures,
Digest: digest.Canonical.FromBytes(signatures),
Digest: supportedDigests.TmpDigestForNewObjects().FromBytes(signatures),
})
}
@ -1586,8 +1594,13 @@ func (s *storageImageDestination) CommitWithOptions(ctx context.Context, options
// sizes (tracked in the metadata) which might have already
// been present with new values, when ideally we'd find a way
// to merge them since they all apply to the same image
// Create a digest function that uses the configured algorithm and handles schema1 manifests properly
digestFunc := func(data []byte) (digest.Digest, error) {
// Use manifest.Digest to handle schema1 signature stripping properly
return manifest.Digest(data)
}
for _, data := range imgOptions.BigData {
if err := s.imageRef.transport.store.SetImageBigData(img.ID, data.Key, data.Data, manifest.Digest); err != nil {
if err := s.imageRef.transport.store.SetImageBigData(img.ID, data.Key, data.Data, digestFunc); err != nil {
logrus.Debugf("error saving big data %q for image %q: %v", data.Key, img.ID, err)
return fmt.Errorf("saving big data %q for image %q: %w", data.Key, img.ID, err)
}
@ -1645,10 +1658,11 @@ func (s *storageImageDestination) CommitWithOptions(ctx context.Context, options
// PutManifest writes the manifest to the destination.
func (s *storageImageDestination) PutManifest(ctx context.Context, manifestBlob []byte, instanceDigest *digest.Digest) error {
digest, err := manifest.Digest(manifestBlob)
if err != nil {
return err
}
// Use the configured digest algorithm for manifest digest
algorithm := supportedDigests.TmpDigestForNewObjects()
logrus.Debugf("PutManifest: Computing manifest digest using algorithm: %s", algorithm.String())
digest := algorithm.FromBytes(manifestBlob)
logrus.Debugf("PutManifest: Computed manifest digest: %s", digest.String())
s.manifest = bytes.Clone(manifestBlob)
if s.manifest == nil { // Make sure PutManifest can never succeed with s.manifest == nil
s.manifest = []byte{}

View File

@ -35,7 +35,7 @@ func newReference(transport storageTransport, named reference.Named, id string)
return nil, fmt.Errorf("reference %s has neither a tag nor a digest: %w", named.String(), ErrInvalidReference)
}
if id != "" {
if err := validateImageID(id); err != nil {
if err := ValidateImageID(id); err != nil {
return nil, fmt.Errorf("invalid ID value %q: %v: %w", id, err.Error(), ErrInvalidReference)
}
}

View File

@ -15,6 +15,7 @@ import (
"go.podman.io/image/v5/types"
"go.podman.io/storage"
"go.podman.io/storage/pkg/idtools"
supportedDigests "go.podman.io/storage/pkg/supported-digests"
)
const (
@ -156,7 +157,7 @@ func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (
// If it looks like a digest, leave it alone for now.
if _, err := digest.Parse(possibleID); err != nil {
// Otherwise…
if err := validateImageID(possibleID); err == nil {
if err := ValidateImageID(possibleID); err == nil {
id = possibleID // … it is a full ID
} else if img, err := store.Image(possibleID); err == nil && img != nil && len(possibleID) >= minimumTruncatedIDLength && strings.HasPrefix(img.ID, possibleID) {
// … it is a truncated version of the ID of an image that's present in local storage,
@ -385,7 +386,7 @@ func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error {
switch len(fields) {
case 1: // name only
case 2: // name:tag@ID or name[:tag]@digest
if idErr := validateImageID(fields[1]); idErr != nil {
if idErr := ValidateImageID(fields[1]); idErr != nil {
if _, digestErr := digest.Parse(fields[1]); digestErr != nil {
return fmt.Errorf("%v is neither a valid digest(%s) nor a valid ID(%s)", fields[1], digestErr.Error(), idErr.Error())
}
@ -394,7 +395,7 @@ func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error {
if _, err := digest.Parse(fields[1]); err != nil {
return err
}
if err := validateImageID(fields[2]); err != nil {
if err := ValidateImageID(fields[2]); err != nil {
return err
}
default: // Coverage: This should never happen
@ -407,8 +408,34 @@ func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error {
return nil
}
// validateImageID returns nil if id is a valid (full) image ID, or an error
func validateImageID(id string) error {
_, err := digest.Parse("sha256:" + id)
return err
// ValidateImageID returns nil if id is a valid (full) image ID, or an error
func ValidateImageID(id string) error {
// Get all supported algorithms dynamically
supportedAlgorithms := supportedDigests.GetSupportedDigestAlgorithms()
// Try each supported algorithm based on the ID length
for _, algorithm := range supportedAlgorithms {
expectedLength, supported := supportedDigests.GetDigestAlgorithmExpectedLength(algorithm)
if !supported {
// Skip algorithms we don't know how to handle yet
continue
}
if len(id) == expectedLength {
_, err := digest.Parse(algorithm.String() + ":" + id)
return err
}
}
// Invalid length - build error message with supported lengths
var supportedLengths []string
for _, algorithm := range supportedAlgorithms {
if expectedLength, supported := supportedDigests.GetDigestAlgorithmExpectedLength(algorithm); supported {
algorithmName := supportedDigests.GetDigestAlgorithmName(algorithm)
supportedLengths = append(supportedLengths, fmt.Sprintf("%d (%s)", expectedLength, algorithmName))
}
}
return fmt.Errorf("invalid image ID length: expected %s characters, got %d",
strings.Join(supportedLengths, " or "), len(id))
}

View File

@ -151,7 +151,7 @@ func (gdw *NaiveDiffDriver) Changes(id string, idMappings *idtools.IDMappings, p
// ApplyDiff extracts the changeset from the given diff into the
// layer with the specified id and parent, returning the size of the
// new layer in bytes.
func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, options ApplyDiffOpts) (size int64, err error) {
func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, options ApplyDiffOpts) (int64, error) {
driver := gdw.ProtoDriver
if options.Mappings == nil {
@ -164,7 +164,7 @@ func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, options ApplyDiffOpts)
}
layerFs, err := driver.Get(id, mountOpts)
if err != nil {
return
return -1, err
}
defer driverPut(driver, id, &err)
@ -185,19 +185,20 @@ func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, options ApplyDiffOpts)
}
start := time.Now().UTC()
logrus.Debug("Start untar layer")
if size, err = ApplyUncompressedLayer(layerFs, options.Diff, tarOptions); err != nil {
size, err := ApplyUncompressedLayer(layerFs, options.Diff, tarOptions)
if err != nil {
logrus.Errorf("While applying layer: %s", err)
return
return -1, err
}
logrus.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds())
return
return size, nil
}
// DiffSize calculates the changes between the specified layer
// and its parent and returns the size in bytes of the changes
// relative to its base filesystem directory.
func (gdw *NaiveDiffDriver) DiffSize(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error) {
func (gdw *NaiveDiffDriver) DiffSize(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (int64, error) {
driver := gdw.ProtoDriver
if idMappings == nil {
@ -209,7 +210,7 @@ func (gdw *NaiveDiffDriver) DiffSize(id string, idMappings *idtools.IDMappings,
changes, err := gdw.Changes(id, idMappings, parent, parentMappings, mountLabel)
if err != nil {
return
return 0, err
}
options := MountOpts{
@ -217,7 +218,7 @@ func (gdw *NaiveDiffDriver) DiffSize(id string, idMappings *idtools.IDMappings,
}
layerFs, err := driver.Get(id, options)
if err != nil {
return
return 0, err
}
defer driverPut(driver, id, &err)

View File

@ -10,6 +10,7 @@ import (
"github.com/sirupsen/logrus"
"go.podman.io/storage/internal/staging_lockfile"
"go.podman.io/storage/pkg/system"
)
/*
@ -148,7 +149,7 @@ func RecoverStaleDirs(rootDir string) error {
continue
}
if rmErr := os.RemoveAll(tempDirPath); rmErr != nil {
if rmErr := system.EnsureRemoveAll(tempDirPath); rmErr != nil {
recoveryErrors = append(recoveryErrors, fmt.Errorf("error removing stale temp dir: %w", rmErr))
}
if unlockErr := instanceLock.UnlockAndDelete(); unlockErr != nil {
@ -218,7 +219,7 @@ func (td *TempDir) Cleanup() error {
return nil
}
if err := os.RemoveAll(td.tempDirPath); err != nil {
if err := system.EnsureRemoveAll(td.tempDirPath); err != nil {
return fmt.Errorf("removing temp dir failed: %w", err)
}

View File

@ -2694,5 +2694,5 @@ func closeAll(closes ...func() error) (rErr error) {
rErr = fmt.Errorf("%v: %w", err, rErr)
}
}
return
return rErr
}

View File

@ -417,9 +417,7 @@ func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, erro
return nil, fmt.Errorf("tar: cannot canonicalize path: %w", err)
}
hdr.Name = name
if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil {
return nil, err
}
setHeaderForSpecialDevice(hdr, name, fi.Sys())
return hdr, nil
}

View File

@ -30,7 +30,7 @@ type overlayWhiteoutConverter struct {
rolayers []string
}
func (o overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) {
func (o overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (*tar.Header, error) {
// convert whiteouts to AUFS format
if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 {
// we just rename the file and make it normal
@ -73,7 +73,7 @@ func (o overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi
// add a whiteout for this item in this layer.
// create a header for the whiteout file
// it should inherit some properties from the parent, but be a regular file
wo = &tar.Header{
wo := &tar.Header{
Typeflag: tar.TypeReg,
Mode: hdr.Mode & int64(os.ModePerm),
Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir),
@ -85,7 +85,7 @@ func (o overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi
AccessTime: hdr.AccessTime,
ChangeTime: hdr.ChangeTime,
}
break
return wo, nil
}
for dir := filepath.Dir(hdr.Name); dir != "" && dir != "." && dir != string(os.PathSeparator); dir = filepath.Dir(dir) {
// Check for whiteout for a parent directory in a parent layer.
@ -109,7 +109,7 @@ func (o overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi
}
}
return
return nil, nil
}
func (overlayWhiteoutConverter) ConvertReadWithHandler(hdr *tar.Header, path string, handler TarWhiteoutHandler) (bool, error) {

View File

@ -69,7 +69,7 @@ func chmodTarEntry(perm os.FileMode) os.FileMode {
return perm // noop for unix as golang APIs provide perm bits correctly
}
func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat any) (err error) {
func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat any) {
s, ok := stat.(*syscall.Stat_t)
if ok {
@ -82,8 +82,6 @@ func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat any) (err erro
hdr.Devminor = int64(minor(uint64(s.Rdev))) //nolint: unconvert,nolintlint
}
}
return
}
func getInodeFromStat(stat any) (inode uint64) {
@ -93,7 +91,7 @@ func getInodeFromStat(stat any) (inode uint64) {
inode = s.Ino
}
return
return inode
}
func getFileUIDGID(stat any) (idtools.IDPair, error) {

View File

@ -52,14 +52,13 @@ func chmodTarEntry(perm os.FileMode) os.FileMode {
return noPermPart | permPart
}
func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) {
func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) {
// do nothing. no notion of Rdev, Nlink in stat on Windows
return
}
func getInodeFromStat(stat interface{}) (inode uint64) {
func getInodeFromStat(stat interface{}) uint64 {
// do nothing. no notion of Inode in stat on Windows
return
return 0
}
// handleTarTypeBlockCharFifo is an OS-specific helper function used by

View File

@ -86,12 +86,12 @@ func Changes(layers []string, rw string) ([]Change, error) {
return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip, aufsWhiteoutPresent)
}
func aufsMetadataSkip(path string) (skip bool, err error) {
skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path)
func aufsMetadataSkip(path string) (bool, error) {
skip, err := filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path)
if err != nil {
skip = true
}
return
return skip, err
}
func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) {

View File

@ -20,8 +20,8 @@ func (info *FileInfo) isDir() bool {
return info.parent == nil || info.stat.Mode().IsDir()
}
func getIno(fi os.FileInfo) (inode uint64) {
return
func getIno(fi os.FileInfo) uint64 {
return 0
}
func hasHardlinks(fi os.FileInfo) bool {

View File

@ -93,13 +93,13 @@ func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) {
// TarResourceRebase is like TarResource but renames the first path element of
// items in the resulting tar archive to match the given rebaseName if not "".
func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) {
func TarResourceRebase(sourcePath, rebaseName string) (io.ReadCloser, error) {
sourcePath = normalizePath(sourcePath)
if err = fileutils.Lexists(sourcePath); err != nil {
if err := fileutils.Lexists(sourcePath); err != nil {
// Catches the case where the source does not exist or is not a
// directory if asserted to be a directory, as this also causes an
// error.
return
return nil, err
}
// Separate the source path between its directory and
@ -411,7 +411,7 @@ func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseNa
if followLink {
resolvedPath, err = filepath.EvalSymlinks(path)
if err != nil {
return
return "", "", err
}
resolvedPath, rebaseName = GetRebaseName(path, resolvedPath)
@ -422,7 +422,7 @@ func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseNa
var resolvedDirPath string
resolvedDirPath, err = filepath.EvalSymlinks(dirPath)
if err != nil {
return
return "", "", err
}
// resolvedDirPath will have been cleaned (no trailing path separators) so
// we can manually join it with the base path element.

View File

@ -10,7 +10,7 @@ func timeToTimespec(time time.Time) (ts syscall.Timespec) {
// Return UTIME_OMIT special value
ts.Sec = 0
ts.Nsec = ((1 << 30) - 2)
return
return ts
}
return syscall.NsecToTimespec(time.UnixNano())
}

View File

@ -45,8 +45,8 @@ func Generate(input ...string) (io.Reader, error) {
return buf, nil
}
func parseStringPairs(input ...string) (output [][2]string) {
output = make([][2]string, 0, len(input)/2+1)
func parseStringPairs(input ...string) [][2]string {
output := make([][2]string, 0, len(input)/2+1)
for i := 0; i < len(input); i += 2 {
var pair [2]string
pair[0] = input[i]
@ -55,5 +55,5 @@ func parseStringPairs(input ...string) (output [][2]string) {
}
output = append(output, pair)
}
return
return output
}

View File

@ -150,7 +150,7 @@ func timeToTimespec(time *time.Time) (ts unix.Timespec) {
// Return UTIME_OMIT special value
ts.Sec = 0
ts.Nsec = ((1 << 30) - 2)
return
return ts
}
return unix.NsecToTimespec(time.UnixNano())
}

View File

@ -1277,7 +1277,7 @@ func ensureAllBlobsDone(streamsOrErrors chan streamOrErr) (retErr error) {
retErr = soe.err
}
}
return
return retErr
}
// getBlobAtConverterGoroutine reads from the streams and errs channels, then sends

View File

@ -19,10 +19,10 @@ func Size(dir string) (size int64, err error) {
}
// Usage walks a directory tree and returns its total size in bytes and the number of inodes.
func Usage(dir string) (usage *DiskUsage, err error) {
usage = &DiskUsage{}
func Usage(dir string) (*DiskUsage, error) {
usage := &DiskUsage{}
data := make(map[uint64]struct{})
err = filepath.WalkDir(dir, func(d string, entry fs.DirEntry, err error) error {
err := filepath.WalkDir(dir, func(d string, entry fs.DirEntry, err error) error {
if err != nil {
// if dir does not exist, Usage() returns the error.
// if dir/x disappeared while walking, Usage() ignores dir/x.
@ -58,5 +58,5 @@ func Usage(dir string) (usage *DiskUsage, err error) {
})
// inode count is the number of unique inode numbers we saw
usage.InodeCount = int64(len(data))
return
return usage, err
}

View File

@ -18,9 +18,9 @@ func Size(dir string) (size int64, err error) {
}
// Usage walks a directory tree and returns its total size in bytes and the number of inodes.
func Usage(dir string) (usage *DiskUsage, err error) {
usage = &DiskUsage{}
err = filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error {
func Usage(dir string) (*DiskUsage, error) {
usage := &DiskUsage{}
err := filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error {
if err != nil {
// if dir does not exist, Size() returns the error.
// if dir/x disappeared while walking, Size() ignores dir/x.
@ -48,5 +48,5 @@ func Usage(dir string) (usage *DiskUsage, err error) {
return nil
})
return
return usage, err
}

View File

@ -121,7 +121,8 @@ func (bp *BytesPipe) Close() error {
// Read reads bytes from BytesPipe.
// Data could be read only once.
func (bp *BytesPipe) Read(p []byte) (n int, err error) {
func (bp *BytesPipe) Read(p []byte) (int, error) {
var n int
bp.mu.Lock()
if bp.bufLen == 0 {
if bp.closeErr != nil {
@ -158,7 +159,7 @@ func (bp *BytesPipe) Read(p []byte) (n int, err error) {
bp.wait.Broadcast()
bp.mu.Unlock()
return
return n, nil
}
func returnBuffer(b *fixedBuffer) {

View File

@ -83,7 +83,7 @@ func (r *OnEOFReader) Read(p []byte) (n int, err error) {
if err == io.EOF {
r.runFunc()
}
return
return n, err
}
// Close closes the file and run the function.

View File

@ -59,8 +59,8 @@ func NewWriteCounter(w io.Writer) *WriteCounter {
}
}
func (wc *WriteCounter) Write(p []byte) (count int, err error) {
count, err = wc.Writer.Write(p)
func (wc *WriteCounter) Write(p []byte) (int, error) {
count, err := wc.Writer.Write(p)
wc.Count += int64(count)
return
return count, err
}

View File

@ -59,11 +59,11 @@ func (bufPool *BufioReaderPool) Put(b *bufio.Reader) {
}
// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy.
func Copy(dst io.Writer, src io.Reader) (written int64, err error) {
func Copy(dst io.Writer, src io.Reader) (int64, error) {
buf := BufioReader32KPool.Get(src)
written, err = io.Copy(dst, buf)
written, err := io.Copy(dst, buf)
BufioReader32KPool.Put(buf)
return
return written, err
}
// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back

View File

@ -0,0 +1,209 @@
package supporteddigests
// Package supporteddigests provides digest algorithm management for container tools.
//
// WARNING: This package is currently Work In Progress (WIP) and is ONLY intended
// for use within Podman, Buildah, and Skopeo. It should NOT be used by external
// applications or libraries, even if shipped in a stable release. The API may
// change without notice and is not considered stable for external consumption.
// Proceed with caution if you must use this package outside of the intended scope.
import (
"fmt"
"strings"
"sync"
"github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus"
)
var (
digestAlgorithm = digest.Canonical // Default to SHA256
algorithmMutex sync.RWMutex // Protects digestAlgorithm from concurrent access
)
// TmpDigestForNewObjects returns the current digest algorithm that will be used
// for computing digests of new objects (e.g., image layers, manifests, blobs).
//
// WARNING: This function is part of a WIP package intended only for Podman,
// Buildah, and Skopeo. Do not use in external applications.
//
// This function returns the globally configured digest algorithm for new object
// creation. It is thread-safe and can be called concurrently from multiple
// goroutines using RWMutex. The default value is SHA256 (digest.Canonical) on
// first call.
//
// This is a read-only operation that does not modify global state. The returned
// value reflects the current global configuration set by TmpSetDigestForNewObjects()
// or the default if never set. Multiple concurrent calls will return the same
// algorithm value. The algorithm is used for computing content hashes during
// image operations such as layer extraction, manifest generation, and blob storage.
func TmpDigestForNewObjects() digest.Algorithm {
algorithmMutex.RLock()
defer algorithmMutex.RUnlock()
return digestAlgorithm
}
// TmpSetDigestForNewObjects sets the digest algorithm that will be used for
// computing digests of new objects (e.g., image layers, manifests, blobs).
//
// WARNING: This function is part of a WIP package intended only for Podman,
// Buildah, and Skopeo. Do not use in external applications.
//
// This function configures the globally shared digest algorithm for new object
// creation. It is thread-safe and can be called concurrently from multiple
// goroutines using RWMutex. Changes affect all subsequent calls to
// TmpDigestForNewObjects().
//
// The function validates the algorithm and returns an error for unsupported values.
// Supported algorithms are SHA256, SHA512, or empty string (which defaults to SHA256).
// This is typically used to configure the digest algorithm for the process where
// an optional --digest flag is provided. For example: "podman|buildah build --digest sha512"
// to configure the digest algorithm for the build process.
//
// The setting persists for the lifetime of the process. This is a write operation
// that modifies global state atomically. Invalid algorithms are rejected without
// changing the current setting. Empty string is treated as a request to reset to
// the default (SHA256). Existing digest values are not affected by algorithm changes.
func TmpSetDigestForNewObjects(algorithm digest.Algorithm) error {
algorithmMutex.Lock()
defer algorithmMutex.Unlock()
// Validate the digest type
switch algorithm {
case digest.SHA256, digest.SHA512:
logrus.Debugf("SetDigestAlgorithm: Setting digest algorithm to %s", algorithm.String())
digestAlgorithm = algorithm
return nil
case "":
logrus.Debugf("SetDigestAlgorithm: Setting digest algorithm to default %s", digest.Canonical.String())
digestAlgorithm = digest.Canonical // Default to sha256
return nil
default:
return fmt.Errorf("unsupported digest algorithm: %q", algorithm)
}
}
// IsSupportedDigestAlgorithm checks if the given algorithm is supported by this package.
//
// WARNING: This function is part of a WIP package intended only for Podman,
// Buildah, and Skopeo. Do not use in external applications.
//
// It returns true if the algorithm is explicitly supported (SHA256, SHA512) or if
// it's an empty string or digest.Canonical (both treated as SHA256 default).
// It returns false for any other algorithm including SHA384, MD5, etc.
//
// This is a pure function with no side effects and is thread-safe for concurrent
// calls from multiple goroutines. It is typically used for validation before
// calling TmpSetDigestForNewObjects().
func IsSupportedDigestAlgorithm(algorithm digest.Algorithm) bool {
// Handle special cases first
if algorithm == "" || algorithm == digest.Canonical {
return true // Empty string and canonical are treated as default (SHA256)
}
// Check against the list of supported algorithms
supportedAlgorithms := GetSupportedDigestAlgorithms()
for _, supported := range supportedAlgorithms {
if algorithm == supported {
return true
}
}
return false
}
// GetSupportedDigestAlgorithms returns a list of all supported digest algorithms.
//
// WARNING: This function is part of a WIP package intended only for Podman,
// Buildah, and Skopeo. Do not use in external applications.
//
// It returns a slice containing all algorithms that can be used with
// TmpSetDigestForNewObjects(). Currently returns [SHA256, SHA512].
//
// This is a pure function with no side effects and is thread-safe for concurrent
// calls from multiple goroutines. The returned slice should not be modified by
// callers. It is typically used for validation and algorithm enumeration.
func GetSupportedDigestAlgorithms() []digest.Algorithm {
return []digest.Algorithm{
digest.SHA256,
digest.SHA512,
}
}
// GetDigestAlgorithmName returns a human-readable name for the algorithm.
//
// WARNING: This function is part of a WIP package intended only for Podman,
// Buildah, and Skopeo. Do not use in external applications.
//
// It returns a standardized uppercase name for supported algorithms. The function
// is case-insensitive, so "sha256", "SHA256", "Sha256" all return "SHA256".
// It returns "SHA256 (canonical)" for digest.Canonical and "unknown" for
// unsupported algorithms.
//
// This is a pure function with no side effects and is thread-safe for concurrent
// calls from multiple goroutines. It is typically used for logging and user-facing
// display purposes.
func GetDigestAlgorithmName(algorithm digest.Algorithm) string {
// Normalize to lowercase for case-insensitive matching
normalized := strings.ToLower(algorithm.String())
switch normalized {
case "sha256":
return "SHA256"
case "sha512":
return "SHA512"
default:
if algorithm == digest.Canonical {
return "SHA256 (canonical)"
}
return "unknown"
}
}
// GetDigestAlgorithmExpectedLength returns the expected hex string length for a given algorithm.
//
// WARNING: This function is part of a WIP package intended only for Podman,
// Buildah, and Skopeo. Do not use in external applications.
//
// It returns (length, true) for supported algorithms with known hex lengths.
// SHA256 returns (64, true) and SHA512 returns (128, true). It returns (0, false)
// for unsupported or unknown algorithms. The length represents the number of hex
// characters in the digest string.
//
// This is a pure function with no side effects and is thread-safe for concurrent
// calls from multiple goroutines. It is typically used for validation and algorithm
// detection from hex string lengths.
func GetDigestAlgorithmExpectedLength(algorithm digest.Algorithm) (int, bool) {
switch algorithm {
case digest.SHA256:
return 64, true
case digest.SHA512:
return 128, true
default:
// For future algorithms, this function can be extended
// to support additional algorithms as they are added
return 0, false
}
}
// DetectDigestAlgorithmFromLength attempts to detect the digest algorithm from a hex string length.
//
// WARNING: This function is part of a WIP package intended only for Podman,
// Buildah, and Skopeo. Do not use in external applications.
//
// It returns (algorithm, true) if a supported algorithm matches the given length,
// or (empty, false) if no supported algorithm matches the length. It checks all
// supported algorithms against their expected hex lengths.
//
// This is a pure function with no side effects and is thread-safe for concurrent
// calls from multiple goroutines. It is typically used for reverse lookup when
// only the hex string length is known. Ambiguous lengths (if any) will return
// the first matching algorithm.
func DetectDigestAlgorithmFromLength(length int) (digest.Algorithm, bool) {
for _, algorithm := range GetSupportedDigestAlgorithms() {
if expectedLength, supported := GetDigestAlgorithmExpectedLength(algorithm); supported && expectedLength == length {
return algorithm, true
}
}
return digest.Algorithm(""), false
}

View File

@ -29,5 +29,5 @@ func ProcessExitCode(err error) (exitCode int) {
exitCode = 127
}
}
return
return exitCode
}

View File

@ -4,10 +4,12 @@ import "syscall"
// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
func fromStatT(s *syscall.Stat_t) (*StatT, error) {
return &StatT{size: s.Size,
return &StatT{
size: s.Size,
mode: uint32(s.Mode),
uid: s.Uid,
gid: s.Gid,
rdev: uint64(s.Rdev),
mtim: s.Mtimespec}, nil
mtim: s.Mtimespec,
}, nil
}

View File

@ -17,12 +17,10 @@ const (
EOVERFLOW unix.Errno = unix.EOVERFLOW
)
var (
namespaceMap = map[string]int{
"user": EXTATTR_NAMESPACE_USER,
"system": EXTATTR_NAMESPACE_SYSTEM,
}
)
var namespaceMap = map[string]int{
"user": EXTATTR_NAMESPACE_USER,
"system": EXTATTR_NAMESPACE_SYSTEM,
}
func xattrToExtattr(xattr string) (namespace int, extattr string, err error) {
namespaceName, extattr, found := strings.Cut(xattr, ".")

View File

@ -43,8 +43,8 @@ type TruncIndex struct {
// NewTruncIndex creates a new TruncIndex and initializes with a list of IDs.
// Invalid IDs are _silently_ ignored.
func NewTruncIndex(ids []string) (idx *TruncIndex) {
idx = &TruncIndex{
func NewTruncIndex(ids []string) *TruncIndex {
idx := &TruncIndex{
ids: make(map[string]struct{}),
// Change patricia max prefix per node length,
@ -54,7 +54,7 @@ func NewTruncIndex(ids []string) (idx *TruncIndex) {
for _, id := range ids {
_ = idx.addID(id) // Ignore invalid IDs. Duplicate IDs are not a problem.
}
return
return idx
}
func (idx *TruncIndex) addID(id string) error {

15
vendor/modules.txt vendored
View File

@ -136,7 +136,7 @@ github.com/docker/docker/client
github.com/docker/docker/pkg/homedir
github.com/docker/docker/pkg/jsonmessage
github.com/docker/docker/pkg/stdcopy
# github.com/docker/docker-credential-helpers v0.9.3
# github.com/docker/docker-credential-helpers v0.9.4
## explicit; go 1.21
github.com/docker/docker-credential-helpers/client
github.com/docker/docker-credential-helpers/credentials
@ -247,7 +247,7 @@ github.com/mattn/go-sqlite3
# github.com/miekg/pkcs11 v1.1.1
## explicit; go 1.12
github.com/miekg/pkcs11
# github.com/mistifyio/go-zfs/v3 v3.0.1
# github.com/mistifyio/go-zfs/v3 v3.1.0
## explicit; go 1.14
github.com/mistifyio/go-zfs/v3
# github.com/moby/buildkit v0.25.0
@ -464,7 +464,7 @@ go.opentelemetry.io/otel/trace
go.opentelemetry.io/otel/trace/embedded
go.opentelemetry.io/otel/trace/internal/telemetry
go.opentelemetry.io/otel/trace/noop
# go.podman.io/common v0.65.1-0.20250916163606-92222dcd3da4
# go.podman.io/common v0.65.1-0.20250916163606-92222dcd3da4 => github.com/lsm5/container-libs/common v0.0.0-20251007215947-621025c4c891
## explicit; go 1.24.2
go.podman.io/common/internal
go.podman.io/common/internal/attributedstring
@ -518,7 +518,7 @@ go.podman.io/common/pkg/umask
go.podman.io/common/pkg/util
go.podman.io/common/pkg/version
go.podman.io/common/version
# go.podman.io/image/v5 v5.37.1-0.20250916163606-92222dcd3da4
# go.podman.io/image/v5 v5.37.1-0.20250916163606-92222dcd3da4 => github.com/lsm5/container-libs/image/v5 v5.0.0-20251007215947-621025c4c891
## explicit; go 1.24.0
go.podman.io/image/v5/copy
go.podman.io/image/v5/directory
@ -567,6 +567,7 @@ go.podman.io/image/v5/pkg/blobinfocache/sqlite
go.podman.io/image/v5/pkg/compression
go.podman.io/image/v5/pkg/compression/internal
go.podman.io/image/v5/pkg/compression/types
go.podman.io/image/v5/pkg/digestvalidation
go.podman.io/image/v5/pkg/docker/config
go.podman.io/image/v5/pkg/shortnames
go.podman.io/image/v5/pkg/strslice
@ -586,7 +587,7 @@ go.podman.io/image/v5/transports
go.podman.io/image/v5/transports/alltransports
go.podman.io/image/v5/types
go.podman.io/image/v5/version
# go.podman.io/storage v1.60.1-0.20250916163606-92222dcd3da4
# go.podman.io/storage v1.60.1-0.20250916163606-92222dcd3da4 => github.com/lsm5/container-libs/storage v0.0.0-20251007215947-621025c4c891
## explicit; go 1.24.0
go.podman.io/storage
go.podman.io/storage/drivers
@ -630,6 +631,7 @@ go.podman.io/storage/pkg/reexec
go.podman.io/storage/pkg/regexp
go.podman.io/storage/pkg/stringid
go.podman.io/storage/pkg/stringutils
go.podman.io/storage/pkg/supported-digests
go.podman.io/storage/pkg/system
go.podman.io/storage/pkg/tarlog
go.podman.io/storage/pkg/truncindex
@ -826,3 +828,6 @@ tags.cncf.io/container-device-interface/pkg/parser
# tags.cncf.io/container-device-interface/specs-go v1.0.0
## explicit; go 1.19
tags.cncf.io/container-device-interface/specs-go
# go.podman.io/common => github.com/lsm5/container-libs/common v0.0.0-20251007215947-621025c4c891
# go.podman.io/storage => github.com/lsm5/container-libs/storage v0.0.0-20251007215947-621025c4c891
# go.podman.io/image/v5 => github.com/lsm5/container-libs/image/v5 v5.0.0-20251007215947-621025c4c891