2017-02-11 00:48:15 +08:00
|
|
|
package buildah
|
2017-01-27 00:58:00 +08:00
|
|
|
|
|
|
|
import (
|
2020-08-11 17:28:41 +08:00
|
|
|
"archive/tar"
|
2017-01-27 00:58:00 +08:00
|
|
|
"bytes"
|
2017-10-10 03:05:56 +08:00
|
|
|
"context"
|
2017-01-27 00:58:00 +08:00
|
|
|
"encoding/json"
|
2022-07-06 17:14:06 +08:00
|
|
|
"errors"
|
2018-05-22 05:02:50 +08:00
|
|
|
"fmt"
|
2017-01-27 00:58:00 +08:00
|
|
|
"io"
|
2025-01-23 22:27:47 +08:00
|
|
|
"maps"
|
2017-01-27 00:58:00 +08:00
|
|
|
"os"
|
2025-05-02 22:04:19 +08:00
|
|
|
"path"
|
2017-01-27 00:58:00 +08:00
|
|
|
"path/filepath"
|
2025-01-23 22:27:47 +08:00
|
|
|
"slices"
|
2019-03-23 05:06:56 +08:00
|
|
|
"strings"
|
2017-01-27 22:38:32 +08:00
|
|
|
"time"
|
2017-01-27 00:58:00 +08:00
|
|
|
|
2019-07-25 22:10:03 +08:00
|
|
|
"github.com/containers/buildah/copier"
|
2021-02-07 06:49:40 +08:00
|
|
|
"github.com/containers/buildah/define"
|
2018-09-18 03:20:16 +08:00
|
|
|
"github.com/containers/buildah/docker"
|
2023-11-01 22:18:40 +08:00
|
|
|
"github.com/containers/buildah/internal/config"
|
2023-07-18 04:27:19 +08:00
|
|
|
"github.com/containers/buildah/internal/mkcw"
|
2023-10-11 14:44:20 +08:00
|
|
|
"github.com/containers/buildah/internal/tmpdir"
|
2019-10-26 05:19:30 +08:00
|
|
|
"github.com/containers/image/v5/docker/reference"
|
|
|
|
"github.com/containers/image/v5/image"
|
|
|
|
"github.com/containers/image/v5/manifest"
|
|
|
|
is "github.com/containers/image/v5/storage"
|
|
|
|
"github.com/containers/image/v5/types"
|
2017-05-17 23:53:28 +08:00
|
|
|
"github.com/containers/storage"
|
2017-01-27 00:58:00 +08:00
|
|
|
"github.com/containers/storage/pkg/archive"
|
2024-07-12 22:16:33 +08:00
|
|
|
"github.com/containers/storage/pkg/chrootarchive"
|
2019-07-25 22:10:03 +08:00
|
|
|
"github.com/containers/storage/pkg/idtools"
|
2017-01-27 00:58:00 +08:00
|
|
|
"github.com/containers/storage/pkg/ioutils"
|
|
|
|
digest "github.com/opencontainers/go-digest"
|
|
|
|
specs "github.com/opencontainers/image-spec/specs-go"
|
2019-07-18 16:42:09 +08:00
|
|
|
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
2017-10-10 03:05:56 +08:00
|
|
|
"github.com/sirupsen/logrus"
|
2017-01-27 00:58:00 +08:00
|
|
|
)
|
|
|
|
|
2017-05-18 05:02:40 +08:00
|
|
|
const (
|
|
|
|
// OCIv1ImageManifest is the MIME type of an OCIv1 image manifest,
|
|
|
|
// suitable for specifying as a value of the PreferredManifestType
|
|
|
|
// member of a CommitOptions structure. It is also the default.
|
2021-03-02 02:07:58 +08:00
|
|
|
OCIv1ImageManifest = define.OCIv1ImageManifest
|
2017-05-18 05:02:40 +08:00
|
|
|
// Dockerv2ImageManifest is the MIME type of a Docker v2s2 image
|
|
|
|
// manifest, suitable for specifying as a value of the
|
|
|
|
// PreferredManifestType member of a CommitOptions structure.
|
2021-03-02 02:07:58 +08:00
|
|
|
Dockerv2ImageManifest = define.Dockerv2ImageManifest
|
2025-05-02 22:04:19 +08:00
|
|
|
// containerExcludesDir is the subdirectory of the container data
|
|
|
|
// directory where we drop exclusions
|
|
|
|
containerExcludesDir = "commit-excludes"
|
2025-07-29 07:56:42 +08:00
|
|
|
// containerPulledUpDir is the subdirectory of the container
|
|
|
|
// data directory where we drop exclusions when we're not squashing
|
|
|
|
containerPulledUpDir = "commit-pulled-up"
|
2025-05-02 22:04:19 +08:00
|
|
|
// containerExcludesSubstring is the suffix of files under
|
2025-07-29 07:56:42 +08:00
|
|
|
// $cdir/containerExcludesDir and $cdir/containerPulledUpDir which
|
|
|
|
// should be ignored, as they only exist because we use CreateTemp() to
|
|
|
|
// create uniquely-named files, but we don't want to try to use their
|
|
|
|
// contents until after they've been written to
|
2025-05-02 22:04:19 +08:00
|
|
|
containerExcludesSubstring = ".tmp"
|
2017-05-18 05:02:40 +08:00
|
|
|
)
|
|
|
|
|
2023-12-13 03:54:42 +08:00
|
|
|
// ExtractRootfsOptions is consumed by ExtractRootfs() which allows users to
|
|
|
|
// control whether various information like the like setuid and setgid bits and
|
|
|
|
// xattrs are preserved when extracting file system objects.
|
2022-04-29 21:39:42 +08:00
|
|
|
type ExtractRootfsOptions struct {
|
2025-04-30 05:04:20 +08:00
|
|
|
StripSetuidBit bool // strip the setuid bit off of items being extracted.
|
|
|
|
StripSetgidBit bool // strip the setgid bit off of items being extracted.
|
|
|
|
StripXattrs bool // don't record extended attributes of items being extracted.
|
|
|
|
ForceTimestamp *time.Time // force timestamps in output content
|
2022-04-29 21:39:42 +08:00
|
|
|
}
|
|
|
|
|
2017-01-27 00:58:00 +08:00
|
|
|
type containerImageRef struct {
|
2020-08-08 01:11:31 +08:00
|
|
|
fromImageName string
|
|
|
|
fromImageID string
|
2017-05-18 05:02:40 +08:00
|
|
|
store storage.Store
|
|
|
|
compression archive.Compression
|
|
|
|
name reference.Named
|
2017-06-02 00:09:23 +08:00
|
|
|
names []string
|
2018-05-22 05:02:50 +08:00
|
|
|
containerID string
|
|
|
|
mountLabel string
|
2017-06-02 00:09:23 +08:00
|
|
|
layerID string
|
2017-05-18 05:02:40 +08:00
|
|
|
oconfig []byte
|
|
|
|
dconfig []byte
|
2020-08-27 04:56:57 +08:00
|
|
|
created *time.Time
|
2017-05-18 05:02:40 +08:00
|
|
|
createdBy string
|
2025-04-30 04:37:27 +08:00
|
|
|
layerModTime *time.Time
|
|
|
|
layerLatestModTime *time.Time
|
2018-04-27 22:59:03 +08:00
|
|
|
historyComment string
|
2017-05-18 05:02:40 +08:00
|
|
|
annotations map[string]string
|
|
|
|
preferredManifestType string
|
2018-05-22 05:02:50 +08:00
|
|
|
squash bool
|
2023-07-18 04:27:19 +08:00
|
|
|
confidentialWorkload ConfidentialWorkloadOptions
|
2022-06-01 15:59:52 +08:00
|
|
|
omitHistory bool
|
2019-04-09 10:59:52 +08:00
|
|
|
emptyLayer bool
|
2025-06-18 22:18:21 +08:00
|
|
|
omitLayerHistoryEntry bool
|
2021-02-07 06:49:40 +08:00
|
|
|
idMappingOptions *define.IDMappingOptions
|
2018-06-09 00:55:46 +08:00
|
|
|
parent string
|
2018-10-18 06:06:16 +08:00
|
|
|
blobDirectory string
|
2019-01-19 04:39:58 +08:00
|
|
|
preEmptyLayers []v1.History
|
2024-07-12 22:16:33 +08:00
|
|
|
preLayers []commitLinkedLayerInfo
|
2019-01-19 04:39:58 +08:00
|
|
|
postEmptyLayers []v1.History
|
2024-07-12 22:16:33 +08:00
|
|
|
postLayers []commitLinkedLayerInfo
|
2023-11-01 22:18:40 +08:00
|
|
|
overrideChanges []string
|
|
|
|
overrideConfig *manifest.Schema2Config
|
2023-12-13 03:54:42 +08:00
|
|
|
extraImageContent map[string]string
|
2024-06-07 03:57:56 +08:00
|
|
|
compatSetParent types.OptionalBool
|
2025-05-02 22:04:19 +08:00
|
|
|
layerExclusions []copier.ConditionalRemovePath
|
2025-08-08 05:28:45 +08:00
|
|
|
layerMountTargets []copier.ConditionalRemovePath
|
|
|
|
layerPullUps []copier.EnsureParentPath
|
2025-06-21 03:05:20 +08:00
|
|
|
unsetAnnotations []string
|
|
|
|
setAnnotations []string
|
|
|
|
createdAnnotation types.OptionalBool
|
2017-01-27 00:58:00 +08:00
|
|
|
}
|
|
|
|
|
2021-05-26 04:34:36 +08:00
|
|
|
type blobLayerInfo struct {
|
|
|
|
ID string
|
|
|
|
Size int64
|
|
|
|
}
|
|
|
|
|
2024-07-12 22:16:33 +08:00
|
|
|
type commitLinkedLayerInfo struct {
|
|
|
|
layerID string // more like layer "ID"
|
|
|
|
linkedLayer LinkedLayer
|
|
|
|
uncompressedDigest digest.Digest
|
|
|
|
size int64
|
|
|
|
}
|
|
|
|
|
2017-01-27 00:58:00 +08:00
|
|
|
type containerImageSource struct {
|
2018-10-18 06:06:16 +08:00
|
|
|
path string
|
|
|
|
ref *containerImageRef
|
|
|
|
store storage.Store
|
|
|
|
containerID string
|
|
|
|
mountLabel string
|
|
|
|
layerID string
|
|
|
|
names []string
|
|
|
|
compression archive.Compression
|
|
|
|
config []byte
|
|
|
|
configDigest digest.Digest
|
|
|
|
manifest []byte
|
|
|
|
manifestType string
|
|
|
|
blobDirectory string
|
2021-05-26 04:34:36 +08:00
|
|
|
blobLayers map[digest.Digest]blobLayerInfo
|
2017-01-27 00:58:00 +08:00
|
|
|
}
|
|
|
|
|
2018-04-12 22:20:36 +08:00
|
|
|
func (i *containerImageRef) NewImage(ctx context.Context, sc *types.SystemContext) (types.ImageCloser, error) {
|
|
|
|
src, err := i.NewImageSource(ctx, sc)
|
2017-01-27 00:58:00 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2018-04-12 22:20:36 +08:00
|
|
|
return image.FromSource(ctx, sc, src)
|
2017-01-27 00:58:00 +08:00
|
|
|
}
|
|
|
|
|
2018-01-05 07:05:40 +08:00
|
|
|
func expectedOCIDiffIDs(image v1.Image) int {
|
|
|
|
expected := 0
|
|
|
|
for _, history := range image.History {
|
|
|
|
if !history.EmptyLayer {
|
|
|
|
expected = expected + 1
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return expected
|
|
|
|
}
|
|
|
|
|
|
|
|
func expectedDockerDiffIDs(image docker.V2Image) int {
|
|
|
|
expected := 0
|
|
|
|
for _, history := range image.History {
|
|
|
|
if !history.EmptyLayer {
|
|
|
|
expected = expected + 1
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return expected
|
|
|
|
}
|
|
|
|
|
2023-07-18 04:27:19 +08:00
|
|
|
// Extract the container's whole filesystem as a filesystem image, wrapped
|
|
|
|
// in LUKS-compatible encryption.
|
|
|
|
func (i *containerImageRef) extractConfidentialWorkloadFS(options ConfidentialWorkloadOptions) (io.ReadCloser, error) {
|
|
|
|
var image v1.Image
|
|
|
|
if err := json.Unmarshal(i.oconfig, &image); err != nil {
|
|
|
|
return nil, fmt.Errorf("recreating OCI configuration for %q: %w", i.containerID, err)
|
|
|
|
}
|
2023-12-13 01:18:20 +08:00
|
|
|
if options.TempDir == "" {
|
|
|
|
cdir, err := i.store.ContainerDirectory(i.containerID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("getting the per-container data directory for %q: %w", i.containerID, err)
|
|
|
|
}
|
|
|
|
tempdir, err := os.MkdirTemp(cdir, "buildah-rootfs")
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("creating a temporary data directory to hold a rootfs image for %q: %w", i.containerID, err)
|
|
|
|
}
|
|
|
|
defer func() {
|
|
|
|
if err := os.RemoveAll(tempdir); err != nil {
|
|
|
|
logrus.Warnf("removing temporary directory %q: %v", tempdir, err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
options.TempDir = tempdir
|
|
|
|
}
|
2023-07-18 04:27:19 +08:00
|
|
|
mountPoint, err := i.store.Mount(i.containerID, i.mountLabel)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("mounting container %q: %w", i.containerID, err)
|
|
|
|
}
|
|
|
|
archiveOptions := mkcw.ArchiveOptions{
|
|
|
|
AttestationURL: options.AttestationURL,
|
|
|
|
CPUs: options.CPUs,
|
|
|
|
Memory: options.Memory,
|
|
|
|
TempDir: options.TempDir,
|
|
|
|
TeeType: options.TeeType,
|
|
|
|
IgnoreAttestationErrors: options.IgnoreAttestationErrors,
|
|
|
|
WorkloadID: options.WorkloadID,
|
|
|
|
DiskEncryptionPassphrase: options.DiskEncryptionPassphrase,
|
|
|
|
Slop: options.Slop,
|
|
|
|
FirmwareLibrary: options.FirmwareLibrary,
|
2023-12-13 01:18:20 +08:00
|
|
|
GraphOptions: i.store.GraphOptions(),
|
2023-12-14 05:01:38 +08:00
|
|
|
ExtraImageContent: i.extraImageContent,
|
2023-12-13 03:54:42 +08:00
|
|
|
}
|
2023-07-18 04:27:19 +08:00
|
|
|
rc, _, err := mkcw.Archive(mountPoint, &image, archiveOptions)
|
|
|
|
if err != nil {
|
|
|
|
if _, err2 := i.store.Unmount(i.containerID, false); err2 != nil {
|
|
|
|
logrus.Debugf("unmounting container %q: %v", i.containerID, err2)
|
|
|
|
}
|
|
|
|
return nil, fmt.Errorf("converting rootfs %q: %w", i.containerID, err)
|
|
|
|
}
|
|
|
|
return ioutils.NewReadCloserWrapper(rc, func() error {
|
|
|
|
if err = rc.Close(); err != nil {
|
|
|
|
err = fmt.Errorf("closing tar archive of container %q: %w", i.containerID, err)
|
|
|
|
}
|
|
|
|
if _, err2 := i.store.Unmount(i.containerID, false); err == nil {
|
|
|
|
if err2 != nil {
|
|
|
|
err2 = fmt.Errorf("unmounting container %q: %w", i.containerID, err2)
|
|
|
|
}
|
|
|
|
err = err2
|
|
|
|
} else {
|
|
|
|
logrus.Debugf("unmounting container %q: %v", i.containerID, err2)
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}), nil
|
|
|
|
}
|
|
|
|
|
2018-05-22 05:02:50 +08:00
|
|
|
// Extract the container's whole filesystem as if it were a single layer.
|
2023-12-13 03:54:42 +08:00
|
|
|
// The ExtractRootfsOptions control whether or not to preserve setuid and
|
|
|
|
// setgid bits and extended attributes on contents.
|
2022-04-29 21:39:42 +08:00
|
|
|
func (i *containerImageRef) extractRootfs(opts ExtractRootfsOptions) (io.ReadCloser, chan error, error) {
|
2019-07-25 22:10:03 +08:00
|
|
|
var uidMap, gidMap []idtools.IDMap
|
2018-05-22 05:02:50 +08:00
|
|
|
mountPoint, err := i.store.Mount(i.containerID, i.mountLabel)
|
|
|
|
if err != nil {
|
2022-09-18 18:36:08 +08:00
|
|
|
return nil, nil, fmt.Errorf("mounting container %q: %w", i.containerID, err)
|
2018-05-22 05:02:50 +08:00
|
|
|
}
|
2019-07-25 22:10:03 +08:00
|
|
|
pipeReader, pipeWriter := io.Pipe()
|
2021-03-12 16:15:41 +08:00
|
|
|
errChan := make(chan error, 1)
|
2019-07-25 22:10:03 +08:00
|
|
|
go func() {
|
2025-04-30 05:04:20 +08:00
|
|
|
defer pipeWriter.Close()
|
2021-03-12 16:15:41 +08:00
|
|
|
defer close(errChan)
|
2023-12-13 03:54:42 +08:00
|
|
|
if len(i.extraImageContent) > 0 {
|
|
|
|
// Abuse the tar format and _prepend_ the synthesized
|
|
|
|
// data items to the archive we'll get from
|
|
|
|
// copier.Get(), in a way that looks right to a reader
|
|
|
|
// as long as we DON'T Close() the tar Writer.
|
2025-04-30 05:04:20 +08:00
|
|
|
filename, _, _, err := i.makeExtraImageContentDiff(false, opts.ForceTimestamp)
|
2023-12-13 03:54:42 +08:00
|
|
|
if err != nil {
|
2025-04-30 04:37:27 +08:00
|
|
|
errChan <- fmt.Errorf("creating part of archive with extra content: %w", err)
|
2023-12-13 03:54:42 +08:00
|
|
|
return
|
|
|
|
}
|
|
|
|
file, err := os.Open(filename)
|
|
|
|
if err != nil {
|
|
|
|
errChan <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer file.Close()
|
|
|
|
if _, err = io.Copy(pipeWriter, file); err != nil {
|
2025-04-30 04:37:27 +08:00
|
|
|
errChan <- fmt.Errorf("writing contents of %q: %w", filename, err)
|
2023-12-13 03:54:42 +08:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
2019-07-25 22:10:03 +08:00
|
|
|
if i.idMappingOptions != nil {
|
|
|
|
uidMap, gidMap = convertRuntimeIDMaps(i.idMappingOptions.UIDMap, i.idMappingOptions.GIDMap)
|
|
|
|
}
|
|
|
|
copierOptions := copier.GetOptions{
|
2022-04-29 21:39:42 +08:00
|
|
|
UIDMap: uidMap,
|
|
|
|
GIDMap: gidMap,
|
|
|
|
StripSetuidBit: opts.StripSetuidBit,
|
|
|
|
StripSetgidBit: opts.StripSetgidBit,
|
|
|
|
StripXattrs: opts.StripXattrs,
|
2025-04-30 05:04:20 +08:00
|
|
|
Timestamp: opts.ForceTimestamp,
|
2019-07-25 22:10:03 +08:00
|
|
|
}
|
2023-12-05 03:05:38 +08:00
|
|
|
err := copier.Get(mountPoint, mountPoint, copierOptions, []string{"."}, pipeWriter)
|
2021-03-12 16:15:41 +08:00
|
|
|
errChan <- err
|
2019-07-25 22:10:03 +08:00
|
|
|
}()
|
|
|
|
return ioutils.NewReadCloserWrapper(pipeReader, func() error {
|
|
|
|
if err = pipeReader.Close(); err != nil {
|
2022-09-18 18:36:08 +08:00
|
|
|
err = fmt.Errorf("closing tar archive of container %q: %w", i.containerID, err)
|
2018-05-22 05:02:50 +08:00
|
|
|
}
|
2018-07-18 23:49:09 +08:00
|
|
|
if _, err2 := i.store.Unmount(i.containerID, false); err == nil {
|
2018-05-22 05:02:50 +08:00
|
|
|
if err2 != nil {
|
2022-09-18 18:36:08 +08:00
|
|
|
err2 = fmt.Errorf("unmounting container %q: %w", i.containerID, err2)
|
2018-05-22 05:02:50 +08:00
|
|
|
}
|
|
|
|
err = err2
|
|
|
|
}
|
|
|
|
return err
|
2021-03-12 16:15:41 +08:00
|
|
|
}), errChan, nil
|
2018-05-22 05:02:50 +08:00
|
|
|
}
|
|
|
|
|
2025-05-21 04:08:17 +08:00
|
|
|
type manifestBuilder interface {
|
|
|
|
// addLayer adds notes to the manifest and config about the layer. The layer blobs are
|
|
|
|
// identified by their possibly-compressed blob digests and sizes in the manifest, and by
|
|
|
|
// their uncompressed digests (diffIDs) in the config.
|
|
|
|
addLayer(layerBlobSum digest.Digest, layerBlobSize int64, diffID digest.Digest)
|
|
|
|
computeLayerMIMEType(what string, layerCompression archive.Compression) error
|
|
|
|
buildHistory(extraImageContentDiff string, extraImageContentDiffDigest digest.Digest) error
|
|
|
|
manifestAndConfig() ([]byte, []byte, error)
|
|
|
|
}
|
|
|
|
|
|
|
|
type dockerSchema2ManifestBuilder struct {
|
|
|
|
i *containerImageRef
|
|
|
|
layerMediaType string
|
|
|
|
dimage docker.V2Image
|
|
|
|
dmanifest docker.V2S2Manifest
|
|
|
|
}
|
|
|
|
|
2018-05-22 05:02:50 +08:00
|
|
|
// Build fresh copies of the container configuration structures so that we can edit them
|
2025-05-21 04:08:17 +08:00
|
|
|
// without making unintended changes to the original Builder (Docker schema 2).
|
|
|
|
func (i *containerImageRef) newDockerSchema2ManifestBuilder() (manifestBuilder, error) {
|
2020-09-01 05:09:10 +08:00
|
|
|
created := time.Now().UTC()
|
2020-08-27 04:56:57 +08:00
|
|
|
if i.created != nil {
|
|
|
|
created = *i.created
|
|
|
|
}
|
2018-05-22 05:02:50 +08:00
|
|
|
|
|
|
|
// Build an empty image, and then decode over it.
|
|
|
|
dimage := docker.V2Image{}
|
|
|
|
if err := json.Unmarshal(i.dconfig, &dimage); err != nil {
|
2025-05-21 04:08:17 +08:00
|
|
|
return nil, err
|
2018-05-22 05:02:50 +08:00
|
|
|
}
|
2025-05-15 04:46:38 +08:00
|
|
|
// Suppress the hostname and domainname if we're running with the
|
|
|
|
// equivalent of either --timestamp or --source-date-epoch.
|
|
|
|
if i.created != nil {
|
|
|
|
dimage.Config.Hostname = "sandbox"
|
|
|
|
dimage.Config.Domainname = ""
|
|
|
|
}
|
2024-06-07 03:57:56 +08:00
|
|
|
// Set the parent, but only if we want to be compatible with "classic" docker build.
|
|
|
|
if i.compatSetParent == types.OptionalBoolTrue {
|
|
|
|
dimage.Parent = docker.ID(i.parent)
|
|
|
|
}
|
|
|
|
// Set the container ID and containerConfig in the docker format.
|
2019-11-20 04:23:14 +08:00
|
|
|
dimage.Container = i.containerID
|
2025-05-15 04:46:38 +08:00
|
|
|
if i.created != nil {
|
|
|
|
dimage.Container = ""
|
|
|
|
}
|
2019-11-20 04:23:14 +08:00
|
|
|
if dimage.Config != nil {
|
|
|
|
dimage.ContainerConfig = *dimage.Config
|
|
|
|
}
|
2018-05-22 05:02:50 +08:00
|
|
|
// Always replace this value, since we're newer than our base image.
|
|
|
|
dimage.Created = created
|
2023-11-08 06:32:25 +08:00
|
|
|
// Clear the list of diffIDs, since we always repopulate it.
|
|
|
|
dimage.RootFS = &docker.V2S2RootFS{}
|
|
|
|
dimage.RootFS.Type = docker.TypeLayers
|
|
|
|
dimage.RootFS.DiffIDs = []digest.Digest{}
|
|
|
|
// Only clear the history if we're squashing, otherwise leave it be so
|
2023-12-13 03:54:42 +08:00
|
|
|
// that we can append entries to it. Clear the parent, too, to reflect
|
|
|
|
// that we no longer include its layers and history.
|
2023-11-08 06:32:25 +08:00
|
|
|
if i.confidentialWorkload.Convert || i.squash || i.omitHistory {
|
|
|
|
dimage.Parent = ""
|
|
|
|
dimage.History = []docker.V2S2History{}
|
|
|
|
}
|
|
|
|
|
2023-11-01 22:18:40 +08:00
|
|
|
// If we were supplied with a configuration, copy fields from it to
|
|
|
|
// matching fields in both formats.
|
2025-05-21 04:08:17 +08:00
|
|
|
if err := config.OverrideDocker(dimage.Config, i.overrideChanges, i.overrideConfig); err != nil {
|
|
|
|
return nil, fmt.Errorf("applying changes: %w", err)
|
2023-11-01 22:18:40 +08:00
|
|
|
}
|
|
|
|
|
2023-07-18 04:27:19 +08:00
|
|
|
// If we're producing a confidential workload, override the command and
|
|
|
|
// assorted other settings that aren't expected to work correctly.
|
|
|
|
if i.confidentialWorkload.Convert {
|
|
|
|
dimage.Config.Entrypoint = []string{"/entrypoint"}
|
|
|
|
dimage.Config.Cmd = nil
|
|
|
|
dimage.Config.User = ""
|
|
|
|
dimage.Config.WorkingDir = ""
|
|
|
|
dimage.Config.Healthcheck = nil
|
|
|
|
dimage.Config.Shell = nil
|
|
|
|
dimage.Config.Volumes = nil
|
|
|
|
dimage.Config.ExposedPorts = nil
|
|
|
|
}
|
2018-05-22 05:02:50 +08:00
|
|
|
|
2025-05-21 04:08:17 +08:00
|
|
|
// Return partial manifest. The Layers lists will be populated later.
|
|
|
|
return &dockerSchema2ManifestBuilder{
|
|
|
|
i: i,
|
|
|
|
layerMediaType: docker.V2S2MediaTypeUncompressedLayer,
|
|
|
|
dimage: dimage,
|
|
|
|
dmanifest: docker.V2S2Manifest{
|
|
|
|
V2Versioned: docker.V2Versioned{
|
|
|
|
SchemaVersion: 2,
|
|
|
|
MediaType: manifest.DockerV2Schema2MediaType,
|
|
|
|
},
|
|
|
|
Config: docker.V2S2Descriptor{
|
|
|
|
MediaType: manifest.DockerV2Schema2ConfigMediaType,
|
|
|
|
},
|
|
|
|
Layers: []docker.V2S2Descriptor{},
|
2018-05-22 05:02:50 +08:00
|
|
|
},
|
2025-05-21 04:08:17 +08:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (mb *dockerSchema2ManifestBuilder) addLayer(layerBlobSum digest.Digest, layerBlobSize int64, diffID digest.Digest) {
|
|
|
|
dlayerDescriptor := docker.V2S2Descriptor{
|
|
|
|
MediaType: mb.layerMediaType,
|
|
|
|
Digest: layerBlobSum,
|
|
|
|
Size: layerBlobSize,
|
2018-05-22 05:02:50 +08:00
|
|
|
}
|
2025-05-21 04:08:17 +08:00
|
|
|
mb.dmanifest.Layers = append(mb.dmanifest.Layers, dlayerDescriptor)
|
|
|
|
// Note this layer in the list of diffIDs, again using the uncompressed digest.
|
|
|
|
mb.dimage.RootFS.DiffIDs = append(mb.dimage.RootFS.DiffIDs, diffID)
|
|
|
|
}
|
2018-05-22 05:02:50 +08:00
|
|
|
|
2025-05-21 04:08:17 +08:00
|
|
|
// Compute the media types which we need to attach to a layer, given the type of
|
|
|
|
// compression that we'll be applying.
|
|
|
|
func (mb *dockerSchema2ManifestBuilder) computeLayerMIMEType(what string, layerCompression archive.Compression) error {
|
|
|
|
dmediaType := docker.V2S2MediaTypeUncompressedLayer
|
|
|
|
if layerCompression != archive.Uncompressed {
|
|
|
|
switch layerCompression {
|
|
|
|
case archive.Gzip:
|
|
|
|
dmediaType = manifest.DockerV2Schema2LayerMediaType
|
|
|
|
logrus.Debugf("compressing %s with gzip", what)
|
|
|
|
case archive.Bzip2:
|
|
|
|
// Until the image specs define a media type for bzip2-compressed layers, even if we know
|
|
|
|
// how to decompress them, we can't try to compress layers with bzip2.
|
|
|
|
return errors.New("media type for bzip2-compressed layers is not defined")
|
|
|
|
case archive.Xz:
|
|
|
|
// Until the image specs define a media type for xz-compressed layers, even if we know
|
|
|
|
// how to decompress them, we can't try to compress layers with xz.
|
|
|
|
return errors.New("media type for xz-compressed layers is not defined")
|
|
|
|
case archive.Zstd:
|
|
|
|
// Until the image specs define a media type for zstd-compressed layers, even if we know
|
|
|
|
// how to decompress them, we can't try to compress layers with zstd.
|
|
|
|
return errors.New("media type for zstd-compressed layers is not defined")
|
|
|
|
default:
|
|
|
|
logrus.Debugf("compressing %s with unknown compressor(?)", what)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
mb.layerMediaType = dmediaType
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (mb *dockerSchema2ManifestBuilder) buildHistory(extraImageContentDiff string, extraImageContentDiffDigest digest.Digest) error {
|
|
|
|
// Build history notes in the image configuration.
|
|
|
|
appendHistory := func(history []v1.History, empty bool) {
|
|
|
|
for i := range history {
|
|
|
|
var created time.Time
|
|
|
|
if history[i].Created != nil {
|
|
|
|
created = *history[i].Created
|
|
|
|
}
|
|
|
|
dnews := docker.V2S2History{
|
|
|
|
Created: created,
|
|
|
|
CreatedBy: history[i].CreatedBy,
|
|
|
|
Author: history[i].Author,
|
|
|
|
Comment: history[i].Comment,
|
|
|
|
EmptyLayer: empty,
|
|
|
|
}
|
|
|
|
mb.dimage.History = append(mb.dimage.History, dnews)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Keep track of how many entries the base image's history had
|
|
|
|
// before we started adding to it.
|
|
|
|
baseImageHistoryLen := len(mb.dimage.History)
|
|
|
|
|
|
|
|
// Add history entries for prepended empty layers.
|
|
|
|
appendHistory(mb.i.preEmptyLayers, true)
|
|
|
|
// Add history entries for prepended API-supplied layers.
|
|
|
|
for _, h := range mb.i.preLayers {
|
|
|
|
appendHistory([]v1.History{h.linkedLayer.History}, h.linkedLayer.History.EmptyLayer)
|
|
|
|
}
|
|
|
|
// Add a history entry for this layer, empty or not.
|
|
|
|
created := time.Now().UTC()
|
|
|
|
if mb.i.created != nil {
|
|
|
|
created = (*mb.i.created).UTC()
|
|
|
|
}
|
2025-06-18 22:18:21 +08:00
|
|
|
if !mb.i.omitLayerHistoryEntry {
|
|
|
|
dnews := docker.V2S2History{
|
|
|
|
Created: created,
|
|
|
|
CreatedBy: mb.i.createdBy,
|
|
|
|
Author: mb.dimage.Author,
|
|
|
|
EmptyLayer: mb.i.emptyLayer,
|
|
|
|
Comment: mb.i.historyComment,
|
|
|
|
}
|
|
|
|
mb.dimage.History = append(mb.dimage.History, dnews)
|
2025-05-21 04:08:17 +08:00
|
|
|
}
|
|
|
|
// Add a history entry for the extra image content if we added a layer for it.
|
|
|
|
// This diff was added to the list of layers before API-supplied layers that
|
|
|
|
// needed to be appended, and we need to keep the order of history entries for
|
|
|
|
// not-empty layers consistent with that.
|
|
|
|
if extraImageContentDiff != "" {
|
|
|
|
createdBy := fmt.Sprintf(`/bin/sh -c #(nop) ADD dir:%s in /",`, extraImageContentDiffDigest.Encoded())
|
|
|
|
dnews := docker.V2S2History{
|
|
|
|
Created: created,
|
|
|
|
CreatedBy: createdBy,
|
|
|
|
}
|
|
|
|
mb.dimage.History = append(mb.dimage.History, dnews)
|
|
|
|
}
|
|
|
|
// Add history entries for appended empty layers.
|
|
|
|
appendHistory(mb.i.postEmptyLayers, true)
|
|
|
|
// Add history entries for appended API-supplied layers.
|
|
|
|
for _, h := range mb.i.postLayers {
|
|
|
|
appendHistory([]v1.History{h.linkedLayer.History}, h.linkedLayer.History.EmptyLayer)
|
|
|
|
}
|
|
|
|
|
2025-06-18 22:18:21 +08:00
|
|
|
// Assemble a comment indicating which base image was used, if it
|
|
|
|
// wasn't just an image ID, and add it to the first history entry we
|
|
|
|
// added, if we indeed added one.
|
|
|
|
if len(mb.dimage.History) > baseImageHistoryLen {
|
|
|
|
var fromComment string
|
|
|
|
if strings.Contains(mb.i.parent, mb.i.fromImageID) && mb.i.fromImageName != "" && !strings.HasPrefix(mb.i.fromImageID, mb.i.fromImageName) {
|
|
|
|
if mb.dimage.History[baseImageHistoryLen].Comment != "" {
|
|
|
|
fromComment = " "
|
|
|
|
}
|
|
|
|
fromComment += "FROM " + mb.i.fromImageName
|
2025-05-21 04:08:17 +08:00
|
|
|
}
|
2025-06-18 22:18:21 +08:00
|
|
|
mb.dimage.History[baseImageHistoryLen].Comment += fromComment
|
2025-05-21 04:08:17 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Confidence check that we didn't just create a mismatch between non-empty layers in the
|
|
|
|
// history and the number of diffIDs. Only applicable if the base image (if there was
|
|
|
|
// one) provided us at least one entry to use as a starting point.
|
|
|
|
if baseImageHistoryLen != 0 {
|
|
|
|
expectedDiffIDs := expectedDockerDiffIDs(mb.dimage)
|
|
|
|
if len(mb.dimage.RootFS.DiffIDs) != expectedDiffIDs {
|
|
|
|
return fmt.Errorf("internal error: history lists %d non-empty layers, but we have %d layers on disk", expectedDiffIDs, len(mb.dimage.RootFS.DiffIDs))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (mb *dockerSchema2ManifestBuilder) manifestAndConfig() ([]byte, []byte, error) {
|
|
|
|
// Encode the image configuration blob.
|
|
|
|
dconfig, err := json.Marshal(&mb.dimage)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, fmt.Errorf("encoding %#v as json: %w", mb.dimage, err)
|
|
|
|
}
|
|
|
|
logrus.Debugf("Docker v2s2 config = %s", dconfig)
|
|
|
|
|
|
|
|
// Add the configuration blob to the manifest.
|
|
|
|
mb.dmanifest.Config.Digest = digest.Canonical.FromBytes(dconfig)
|
|
|
|
mb.dmanifest.Config.Size = int64(len(dconfig))
|
|
|
|
mb.dmanifest.Config.MediaType = manifest.DockerV2Schema2ConfigMediaType
|
|
|
|
|
|
|
|
// Encode the manifest.
|
|
|
|
dmanifestbytes, err := json.Marshal(&mb.dmanifest)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, fmt.Errorf("encoding %#v as json: %w", mb.dmanifest, err)
|
|
|
|
}
|
|
|
|
logrus.Debugf("Docker v2s2 manifest = %s", dmanifestbytes)
|
|
|
|
|
|
|
|
return dmanifestbytes, dconfig, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
type ociManifestBuilder struct {
|
|
|
|
i *containerImageRef
|
|
|
|
layerMediaType string
|
|
|
|
oimage v1.Image
|
|
|
|
omanifest v1.Manifest
|
|
|
|
}
|
|
|
|
|
|
|
|
// Build fresh copies of the container configuration structures so that we can edit them
|
|
|
|
// without making unintended changes to the original Builder (OCI manifest).
|
|
|
|
func (i *containerImageRef) newOCIManifestBuilder() (manifestBuilder, error) {
|
|
|
|
created := time.Now().UTC()
|
|
|
|
if i.created != nil {
|
|
|
|
created = *i.created
|
|
|
|
}
|
|
|
|
|
|
|
|
// Build an empty image, and then decode over it.
|
|
|
|
oimage := v1.Image{}
|
|
|
|
if err := json.Unmarshal(i.oconfig, &oimage); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
// Always replace this value, since we're newer than our base image.
|
|
|
|
oimage.Created = &created
|
|
|
|
// Clear the list of diffIDs, since we always repopulate it.
|
|
|
|
oimage.RootFS.Type = docker.TypeLayers
|
|
|
|
oimage.RootFS.DiffIDs = []digest.Digest{}
|
|
|
|
// Only clear the history if we're squashing, otherwise leave it be so that we can append
|
|
|
|
// entries to it.
|
|
|
|
if i.confidentialWorkload.Convert || i.squash || i.omitHistory {
|
|
|
|
oimage.History = []v1.History{}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we were supplied with a configuration, copy fields from it to
|
|
|
|
// matching fields in both formats.
|
|
|
|
if err := config.OverrideOCI(&oimage.Config, i.overrideChanges, i.overrideConfig); err != nil {
|
|
|
|
return nil, fmt.Errorf("applying changes: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we're producing a confidential workload, override the command and
|
|
|
|
// assorted other settings that aren't expected to work correctly.
|
|
|
|
if i.confidentialWorkload.Convert {
|
|
|
|
oimage.Config.Entrypoint = []string{"/entrypoint"}
|
|
|
|
oimage.Config.Cmd = nil
|
|
|
|
oimage.Config.User = ""
|
|
|
|
oimage.Config.WorkingDir = ""
|
|
|
|
oimage.Config.Volumes = nil
|
|
|
|
oimage.Config.ExposedPorts = nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return partial manifest. The Layers lists will be populated later.
|
2025-06-21 03:05:20 +08:00
|
|
|
annotations := make(map[string]string)
|
|
|
|
maps.Copy(annotations, i.annotations)
|
|
|
|
switch i.createdAnnotation {
|
|
|
|
case types.OptionalBoolFalse:
|
|
|
|
delete(annotations, v1.AnnotationCreated)
|
|
|
|
default:
|
|
|
|
fallthrough
|
|
|
|
case types.OptionalBoolTrue, types.OptionalBoolUndefined:
|
|
|
|
annotations[v1.AnnotationCreated] = created.UTC().Format(time.RFC3339Nano)
|
|
|
|
}
|
|
|
|
for _, k := range i.unsetAnnotations {
|
|
|
|
delete(annotations, k)
|
|
|
|
}
|
|
|
|
for _, kv := range i.setAnnotations {
|
|
|
|
k, v, _ := strings.Cut(kv, "=")
|
|
|
|
annotations[k] = v
|
|
|
|
}
|
2025-05-21 04:08:17 +08:00
|
|
|
return &ociManifestBuilder{
|
|
|
|
i: i,
|
|
|
|
// The default layer media type assumes no compression.
|
|
|
|
layerMediaType: v1.MediaTypeImageLayer,
|
|
|
|
oimage: oimage,
|
|
|
|
omanifest: v1.Manifest{
|
|
|
|
Versioned: specs.Versioned{
|
|
|
|
SchemaVersion: 2,
|
|
|
|
},
|
|
|
|
MediaType: v1.MediaTypeImageManifest,
|
|
|
|
Config: v1.Descriptor{
|
|
|
|
MediaType: v1.MediaTypeImageConfig,
|
|
|
|
},
|
|
|
|
Layers: []v1.Descriptor{},
|
2025-06-21 03:05:20 +08:00
|
|
|
Annotations: annotations,
|
2018-05-22 05:02:50 +08:00
|
|
|
},
|
2025-05-21 04:08:17 +08:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (mb *ociManifestBuilder) addLayer(layerBlobSum digest.Digest, layerBlobSize int64, diffID digest.Digest) {
|
|
|
|
olayerDescriptor := v1.Descriptor{
|
|
|
|
MediaType: mb.layerMediaType,
|
|
|
|
Digest: layerBlobSum,
|
|
|
|
Size: layerBlobSize,
|
2018-05-22 05:02:50 +08:00
|
|
|
}
|
2025-05-21 04:08:17 +08:00
|
|
|
mb.omanifest.Layers = append(mb.omanifest.Layers, olayerDescriptor)
|
|
|
|
// Note this layer in the list of diffIDs, again using the uncompressed digest.
|
|
|
|
mb.oimage.RootFS.DiffIDs = append(mb.oimage.RootFS.DiffIDs, diffID)
|
|
|
|
}
|
2018-05-22 05:02:50 +08:00
|
|
|
|
2025-05-21 04:08:17 +08:00
|
|
|
// Compute the media types which we need to attach to a layer, given the type of
|
|
|
|
// compression that we'll be applying.
|
|
|
|
func (mb *ociManifestBuilder) computeLayerMIMEType(what string, layerCompression archive.Compression) error {
|
|
|
|
omediaType := v1.MediaTypeImageLayer
|
|
|
|
if layerCompression != archive.Uncompressed {
|
|
|
|
switch layerCompression {
|
|
|
|
case archive.Gzip:
|
|
|
|
omediaType = v1.MediaTypeImageLayerGzip
|
|
|
|
logrus.Debugf("compressing %s with gzip", what)
|
|
|
|
case archive.Bzip2:
|
|
|
|
// Until the image specs define a media type for bzip2-compressed layers, even if we know
|
|
|
|
// how to decompress them, we can't try to compress layers with bzip2.
|
|
|
|
return errors.New("media type for bzip2-compressed layers is not defined")
|
|
|
|
case archive.Xz:
|
|
|
|
// Until the image specs define a media type for xz-compressed layers, even if we know
|
|
|
|
// how to decompress them, we can't try to compress layers with xz.
|
|
|
|
return errors.New("media type for xz-compressed layers is not defined")
|
|
|
|
case archive.Zstd:
|
2024-04-02 04:24:15 +08:00
|
|
|
omediaType = v1.MediaTypeImageLayerZstd
|
|
|
|
logrus.Debugf("compressing %s with zstd", what)
|
2025-05-21 04:08:17 +08:00
|
|
|
default:
|
|
|
|
logrus.Debugf("compressing %s with unknown compressor(?)", what)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
mb.layerMediaType = omediaType
|
|
|
|
return nil
|
2018-05-22 05:02:50 +08:00
|
|
|
}
|
|
|
|
|
2025-05-21 04:08:17 +08:00
|
|
|
func (mb *ociManifestBuilder) buildHistory(extraImageContentDiff string, extraImageContentDiffDigest digest.Digest) error {
|
|
|
|
// Build history notes in the image configuration.
|
|
|
|
appendHistory := func(history []v1.History, empty bool) {
|
|
|
|
for i := range history {
|
|
|
|
var created *time.Time
|
|
|
|
if history[i].Created != nil {
|
|
|
|
copiedTimestamp := *history[i].Created
|
|
|
|
created = &copiedTimestamp
|
|
|
|
}
|
|
|
|
onews := v1.History{
|
|
|
|
Created: created,
|
|
|
|
CreatedBy: history[i].CreatedBy,
|
|
|
|
Author: history[i].Author,
|
|
|
|
Comment: history[i].Comment,
|
|
|
|
EmptyLayer: empty,
|
|
|
|
}
|
|
|
|
mb.oimage.History = append(mb.oimage.History, onews)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Keep track of how many entries the base image's history had
|
|
|
|
// before we started adding to it.
|
|
|
|
baseImageHistoryLen := len(mb.oimage.History)
|
|
|
|
|
|
|
|
// Add history entries for prepended empty layers.
|
|
|
|
appendHistory(mb.i.preEmptyLayers, true)
|
|
|
|
// Add history entries for prepended API-supplied layers.
|
|
|
|
for _, h := range mb.i.preLayers {
|
|
|
|
appendHistory([]v1.History{h.linkedLayer.History}, h.linkedLayer.History.EmptyLayer)
|
|
|
|
}
|
|
|
|
// Add a history entry for this layer, empty or not.
|
|
|
|
created := time.Now().UTC()
|
|
|
|
if mb.i.created != nil {
|
|
|
|
created = (*mb.i.created).UTC()
|
|
|
|
}
|
2025-06-18 22:18:21 +08:00
|
|
|
if !mb.i.omitLayerHistoryEntry {
|
|
|
|
onews := v1.History{
|
|
|
|
Created: &created,
|
|
|
|
CreatedBy: mb.i.createdBy,
|
|
|
|
Author: mb.oimage.Author,
|
|
|
|
EmptyLayer: mb.i.emptyLayer,
|
|
|
|
Comment: mb.i.historyComment,
|
|
|
|
}
|
|
|
|
mb.oimage.History = append(mb.oimage.History, onews)
|
2025-05-21 04:08:17 +08:00
|
|
|
}
|
|
|
|
// Add a history entry for the extra image content if we added a layer for it.
|
|
|
|
// This diff was added to the list of layers before API-supplied layers that
|
|
|
|
// needed to be appended, and we need to keep the order of history entries for
|
|
|
|
// not-empty layers consistent with that.
|
|
|
|
if extraImageContentDiff != "" {
|
|
|
|
createdBy := fmt.Sprintf(`/bin/sh -c #(nop) ADD dir:%s in /",`, extraImageContentDiffDigest.Encoded())
|
|
|
|
onews := v1.History{
|
|
|
|
Created: &created,
|
|
|
|
CreatedBy: createdBy,
|
|
|
|
}
|
|
|
|
mb.oimage.History = append(mb.oimage.History, onews)
|
|
|
|
}
|
|
|
|
// Add history entries for appended empty layers.
|
|
|
|
appendHistory(mb.i.postEmptyLayers, true)
|
|
|
|
// Add history entries for appended API-supplied layers.
|
|
|
|
for _, h := range mb.i.postLayers {
|
|
|
|
appendHistory([]v1.History{h.linkedLayer.History}, h.linkedLayer.History.EmptyLayer)
|
|
|
|
}
|
|
|
|
|
2025-06-18 22:18:21 +08:00
|
|
|
// Assemble a comment indicating which base image was used, if it
|
|
|
|
// wasn't just an image ID, and add it to the first history entry we
|
|
|
|
// added, if we indeed added one.
|
|
|
|
if len(mb.oimage.History) > baseImageHistoryLen {
|
|
|
|
var fromComment string
|
|
|
|
if strings.Contains(mb.i.parent, mb.i.fromImageID) && mb.i.fromImageName != "" && !strings.HasPrefix(mb.i.fromImageID, mb.i.fromImageName) {
|
|
|
|
if mb.oimage.History[baseImageHistoryLen].Comment != "" {
|
|
|
|
fromComment = " "
|
|
|
|
}
|
|
|
|
fromComment += "FROM " + mb.i.fromImageName
|
2025-05-21 04:08:17 +08:00
|
|
|
}
|
2025-06-18 22:18:21 +08:00
|
|
|
mb.oimage.History[baseImageHistoryLen].Comment += fromComment
|
2025-05-21 04:08:17 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Confidence check that we didn't just create a mismatch between non-empty layers in the
|
|
|
|
// history and the number of diffIDs. Only applicable if the base image (if there was
|
|
|
|
// one) provided us at least one entry to use as a starting point.
|
|
|
|
if baseImageHistoryLen != 0 {
|
|
|
|
expectedDiffIDs := expectedOCIDiffIDs(mb.oimage)
|
|
|
|
if len(mb.oimage.RootFS.DiffIDs) != expectedDiffIDs {
|
|
|
|
return fmt.Errorf("internal error: history lists %d non-empty layers, but we have %d layers on disk", expectedDiffIDs, len(mb.oimage.RootFS.DiffIDs))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (mb *ociManifestBuilder) manifestAndConfig() ([]byte, []byte, error) {
|
|
|
|
// Encode the image configuration blob.
|
|
|
|
oconfig, err := json.Marshal(&mb.oimage)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, fmt.Errorf("encoding %#v as json: %w", mb.oimage, err)
|
|
|
|
}
|
|
|
|
logrus.Debugf("OCIv1 config = %s", oconfig)
|
|
|
|
|
|
|
|
// Add the configuration blob to the manifest.
|
|
|
|
mb.omanifest.Config.Digest = digest.Canonical.FromBytes(oconfig)
|
|
|
|
mb.omanifest.Config.Size = int64(len(oconfig))
|
|
|
|
mb.omanifest.Config.MediaType = v1.MediaTypeImageConfig
|
|
|
|
|
|
|
|
// Encode the manifest.
|
|
|
|
omanifestbytes, err := json.Marshal(&mb.omanifest)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, fmt.Errorf("encoding %#v as json: %w", mb.omanifest, err)
|
2017-05-18 05:01:06 +08:00
|
|
|
}
|
2025-05-21 04:08:17 +08:00
|
|
|
logrus.Debugf("OCIv1 manifest = %s", omanifestbytes)
|
|
|
|
|
|
|
|
return omanifestbytes, oconfig, nil
|
|
|
|
}
|
|
|
|
|
2025-08-08 05:28:45 +08:00
|
|
|
// filterExclusionsByImage returns a slice of the members of "exclusions" which are present in the image with the specified ID
|
|
|
|
func (i containerImageRef) filterExclusionsByImage(exclusions []copier.EnsureParentPath, imageID string) ([]copier.EnsureParentPath, error) {
|
|
|
|
if len(exclusions) == 0 || imageID == "" {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
var paths []copier.EnsureParentPath
|
|
|
|
mountPoint, err := i.store.MountImage(imageID, nil, i.mountLabel)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
defer func() {
|
|
|
|
if _, err := i.store.UnmountImage(imageID, false); err != nil {
|
|
|
|
logrus.Debugf("unmounting image %q: %v", imageID, err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
globs := make([]string, 0, len(exclusions))
|
|
|
|
for _, exclusion := range exclusions {
|
|
|
|
globs = append(globs, exclusion.Path)
|
|
|
|
}
|
|
|
|
options := copier.StatOptions{}
|
|
|
|
stats, err := copier.Stat(mountPoint, mountPoint, options, globs)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("checking for potential exclusion items in image %q: %w", imageID, err)
|
|
|
|
}
|
|
|
|
for _, stat := range stats {
|
|
|
|
for _, exclusion := range exclusions {
|
|
|
|
if stat.Glob != exclusion.Path {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
for result, stat := range stat.Results {
|
|
|
|
if result != exclusion.Path {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if exclusion.ModTime != nil && !exclusion.ModTime.Equal(stat.ModTime) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if exclusion.Mode != nil && *exclusion.Mode != stat.Mode {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if exclusion.Owner != nil && (int64(exclusion.Owner.UID) != stat.UID && int64(exclusion.Owner.GID) != stat.GID) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
paths = append(paths, exclusion)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return paths, nil
|
|
|
|
}
|
|
|
|
|
2025-05-21 04:08:17 +08:00
|
|
|
func (i *containerImageRef) NewImageSource(_ context.Context, _ *types.SystemContext) (src types.ImageSource, err error) {
|
2024-07-12 22:16:33 +08:00
|
|
|
// These maps will let us check if a layer ID is part of one group or another.
|
|
|
|
parentLayerIDs := make(map[string]bool)
|
|
|
|
apiLayerIDs := make(map[string]bool)
|
|
|
|
// Start building the list of layers with any prepended layers.
|
2017-01-27 00:58:00 +08:00
|
|
|
layers := []string{}
|
2024-07-12 22:16:33 +08:00
|
|
|
for _, preLayer := range i.preLayers {
|
|
|
|
layers = append(layers, preLayer.layerID)
|
|
|
|
apiLayerIDs[preLayer.layerID] = true
|
|
|
|
}
|
|
|
|
// Now look at the read-write layer, and prepare to work our way back
|
|
|
|
// through all of its parent layers.
|
2017-06-02 00:09:23 +08:00
|
|
|
layerID := i.layerID
|
2017-05-17 23:53:28 +08:00
|
|
|
layer, err := i.store.Layer(layerID)
|
2017-01-27 00:58:00 +08:00
|
|
|
if err != nil {
|
2022-07-06 17:14:06 +08:00
|
|
|
return nil, fmt.Errorf("unable to read layer %q: %w", layerID, err)
|
2017-01-27 00:58:00 +08:00
|
|
|
}
|
2023-12-13 03:54:42 +08:00
|
|
|
// Walk the list of parent layers, prepending each as we go. If we're squashing
|
|
|
|
// or making a confidential workload, we're only producing one layer, so stop at
|
|
|
|
// the layer ID of the top layer, which we won't really be using anyway.
|
2017-01-27 00:58:00 +08:00
|
|
|
for layer != nil {
|
2024-07-12 22:16:33 +08:00
|
|
|
if layerID == i.layerID {
|
|
|
|
// append the layer for this container to the list,
|
|
|
|
// whether it's first or after some prepended layers
|
|
|
|
layers = append(layers, layerID)
|
|
|
|
} else {
|
|
|
|
// prepend this parent layer to the list
|
|
|
|
layers = append(append([]string{}, layerID), layers...)
|
|
|
|
parentLayerIDs[layerID] = true
|
|
|
|
}
|
2017-01-27 00:58:00 +08:00
|
|
|
layerID = layer.Parent
|
2023-07-18 04:27:19 +08:00
|
|
|
if layerID == "" || i.confidentialWorkload.Convert || i.squash {
|
2017-01-27 00:58:00 +08:00
|
|
|
err = nil
|
|
|
|
break
|
|
|
|
}
|
2017-05-17 23:53:28 +08:00
|
|
|
layer, err = i.store.Layer(layerID)
|
2017-01-27 00:58:00 +08:00
|
|
|
if err != nil {
|
2022-07-06 17:14:06 +08:00
|
|
|
return nil, fmt.Errorf("unable to read layer %q: %w", layerID, err)
|
2017-01-27 00:58:00 +08:00
|
|
|
}
|
|
|
|
}
|
2023-12-13 03:54:42 +08:00
|
|
|
layer = nil
|
|
|
|
|
2024-07-12 22:16:33 +08:00
|
|
|
// If we're slipping in a synthesized layer to hold some files, we need
|
|
|
|
// to add a placeholder for it to the list just after the read-write
|
|
|
|
// layer. Confidential workloads and squashed images will just inline
|
|
|
|
// the files, so we don't need to create a layer in those cases.
|
2023-12-13 03:54:42 +08:00
|
|
|
const synthesizedLayerID = "(synthesized layer)"
|
|
|
|
if len(i.extraImageContent) > 0 && !i.confidentialWorkload.Convert && !i.squash {
|
|
|
|
layers = append(layers, synthesizedLayerID)
|
|
|
|
}
|
2024-07-12 22:16:33 +08:00
|
|
|
// Now add any API-supplied layers we have to append.
|
|
|
|
for _, postLayer := range i.postLayers {
|
|
|
|
layers = append(layers, postLayer.layerID)
|
|
|
|
apiLayerIDs[postLayer.layerID] = true
|
|
|
|
}
|
2017-01-27 22:38:32 +08:00
|
|
|
logrus.Debugf("layer list: %q", layers)
|
|
|
|
|
2024-07-12 22:16:33 +08:00
|
|
|
// It's simpler from here on to keep track of these as a group.
|
|
|
|
apiLayers := append(slices.Clone(i.preLayers), slices.Clone(i.postLayers)...)
|
|
|
|
|
2017-06-01 01:44:41 +08:00
|
|
|
// Make a temporary directory to hold blobs.
|
2023-10-11 14:44:20 +08:00
|
|
|
path, err := os.MkdirTemp(tmpdir.GetTempDir(), define.Package)
|
2017-01-27 00:58:00 +08:00
|
|
|
if err != nil {
|
2022-09-18 18:36:08 +08:00
|
|
|
return nil, fmt.Errorf("creating temporary directory to hold layer blobs: %w", err)
|
2017-01-27 00:58:00 +08:00
|
|
|
}
|
2017-01-28 15:18:02 +08:00
|
|
|
logrus.Debugf("using %q to hold temporary data", path)
|
2017-01-27 00:58:00 +08:00
|
|
|
defer func() {
|
|
|
|
if src == nil {
|
|
|
|
err2 := os.RemoveAll(path)
|
|
|
|
if err2 != nil {
|
2021-03-27 17:17:12 +08:00
|
|
|
logrus.Errorf("error removing layer blob directory: %v", err)
|
2017-01-27 00:58:00 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2018-05-22 05:02:50 +08:00
|
|
|
// Build fresh copies of the configurations and manifest so that we don't mess with any
|
|
|
|
// values in the Builder object itself.
|
2025-05-21 04:08:17 +08:00
|
|
|
var mb manifestBuilder
|
|
|
|
switch i.preferredManifestType {
|
|
|
|
case v1.MediaTypeImageManifest:
|
|
|
|
mb, err = i.newOCIManifestBuilder()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
case manifest.DockerV2Schema2MediaType:
|
|
|
|
mb, err = i.newDockerSchema2ManifestBuilder()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
return nil, fmt.Errorf("no supported manifest types (attempted to use %q, only know %q and %q)",
|
|
|
|
i.preferredManifestType, v1.MediaTypeImageManifest, manifest.DockerV2Schema2MediaType)
|
2017-05-18 05:01:06 +08:00
|
|
|
}
|
2017-01-27 22:38:32 +08:00
|
|
|
|
2017-06-01 01:44:41 +08:00
|
|
|
// Extract each layer and compute its digests, both compressed (if requested) and uncompressed.
|
2023-12-13 03:54:42 +08:00
|
|
|
var extraImageContentDiff string
|
|
|
|
var extraImageContentDiffDigest digest.Digest
|
2021-05-26 04:34:36 +08:00
|
|
|
blobLayers := make(map[digest.Digest]blobLayerInfo)
|
2017-01-27 00:58:00 +08:00
|
|
|
for _, layerID := range layers {
|
2018-05-22 05:02:50 +08:00
|
|
|
what := fmt.Sprintf("layer %q", layerID)
|
2023-07-18 04:27:19 +08:00
|
|
|
if i.confidentialWorkload.Convert || i.squash {
|
2018-05-22 05:02:50 +08:00
|
|
|
what = fmt.Sprintf("container %q", i.containerID)
|
|
|
|
}
|
2024-07-12 22:16:33 +08:00
|
|
|
if layerID == synthesizedLayerID {
|
|
|
|
what = synthesizedLayerID
|
|
|
|
}
|
|
|
|
if apiLayerIDs[layerID] {
|
|
|
|
what = layerID
|
|
|
|
}
|
2018-10-18 06:06:16 +08:00
|
|
|
// Look up this layer.
|
2023-12-13 03:54:42 +08:00
|
|
|
var layerUncompressedDigest digest.Digest
|
|
|
|
var layerUncompressedSize int64
|
2024-07-12 22:16:33 +08:00
|
|
|
linkedLayerHasLayerID := func(l commitLinkedLayerInfo) bool { return l.layerID == layerID }
|
|
|
|
if apiLayerIDs[layerID] {
|
|
|
|
// API-provided prepended or appended layer
|
|
|
|
apiLayerIndex := slices.IndexFunc(apiLayers, linkedLayerHasLayerID)
|
|
|
|
layerUncompressedDigest = apiLayers[apiLayerIndex].uncompressedDigest
|
|
|
|
layerUncompressedSize = apiLayers[apiLayerIndex].size
|
|
|
|
} else if layerID == synthesizedLayerID {
|
|
|
|
// layer diff consisting of extra files to synthesize into a layer
|
2025-04-30 05:04:20 +08:00
|
|
|
diffFilename, digest, size, err := i.makeExtraImageContentDiff(true, nil)
|
2023-12-13 03:54:42 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to generate layer for additional content: %w", err)
|
|
|
|
}
|
|
|
|
extraImageContentDiff = diffFilename
|
|
|
|
extraImageContentDiffDigest = digest
|
|
|
|
layerUncompressedDigest = digest
|
|
|
|
layerUncompressedSize = size
|
2024-07-12 22:16:33 +08:00
|
|
|
} else {
|
|
|
|
// "normal" layer
|
|
|
|
layer, err := i.store.Layer(layerID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to locate layer %q: %w", layerID, err)
|
|
|
|
}
|
|
|
|
layerID = layer.ID
|
|
|
|
layerUncompressedDigest = layer.UncompressedDigest
|
|
|
|
layerUncompressedSize = layer.UncompressedSize
|
2018-10-18 06:06:16 +08:00
|
|
|
}
|
2024-07-12 22:16:33 +08:00
|
|
|
// We already know the digest of the contents of parent layers,
|
|
|
|
// so if this is a parent layer, and we know its digest, reuse
|
|
|
|
// its blobsum, diff ID, and size.
|
|
|
|
if !i.confidentialWorkload.Convert && !i.squash && parentLayerIDs[layerID] && layerUncompressedDigest != "" {
|
2023-12-13 03:54:42 +08:00
|
|
|
layerBlobSum := layerUncompressedDigest
|
|
|
|
layerBlobSize := layerUncompressedSize
|
|
|
|
diffID := layerUncompressedDigest
|
2018-10-18 06:06:16 +08:00
|
|
|
// Note this layer in the manifest, using the appropriate blobsum.
|
2025-05-21 04:08:17 +08:00
|
|
|
mb.addLayer(layerBlobSum, layerBlobSize, diffID)
|
2021-05-26 04:34:36 +08:00
|
|
|
blobLayers[diffID] = blobLayerInfo{
|
2023-12-13 03:54:42 +08:00
|
|
|
ID: layerID,
|
2021-05-26 04:34:36 +08:00
|
|
|
Size: layerBlobSize,
|
|
|
|
}
|
2017-06-29 05:07:58 +08:00
|
|
|
continue
|
|
|
|
}
|
2018-10-18 06:06:16 +08:00
|
|
|
// Figure out if we need to change the media type, in case we've changed the compression.
|
2025-05-21 04:08:17 +08:00
|
|
|
if err := mb.computeLayerMIMEType(what, i.compression); err != nil {
|
2018-05-22 05:02:50 +08:00
|
|
|
return nil, err
|
Take a shortcut when writing to local storage
When writing to local storage, take a couple of shortcuts: instead of
recompressing layers to ensure that the values we store in the image
manifest will be correct for content-addressibility, just pretend that
the layer ID is a blob hash value, and that it's a valid layer diffID.
Local storage doesn't generally care if these values are correct, and we
already have to recompute these values when exporting an image, but this
saves us quite a bit of time.
The image library's Copy() routine actually cares about and
sanity-checks these things, so if we're going to take advantage of the
shortcuts, we need to use its higher-level APIs to write a layer, write
the configuration, and write the manifest, then move those items that it
writes to an image with the right set of layers.
Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
Closes: #141
Approved by: rhatdan
2017-06-01 01:56:25 +08:00
|
|
|
}
|
2018-05-22 05:02:50 +08:00
|
|
|
// Start reading either the layer or the whole container rootfs.
|
2017-06-29 05:07:58 +08:00
|
|
|
noCompression := archive.Uncompressed
|
|
|
|
diffOptions := &storage.DiffOptions{
|
|
|
|
Compression: &noCompression,
|
|
|
|
}
|
2018-05-22 05:02:50 +08:00
|
|
|
var rc io.ReadCloser
|
2021-03-12 16:15:41 +08:00
|
|
|
var errChan chan error
|
2025-08-08 05:28:45 +08:00
|
|
|
var layerExclusions []copier.ConditionalRemovePath
|
2023-07-18 04:27:19 +08:00
|
|
|
if i.confidentialWorkload.Convert {
|
|
|
|
// Convert the root filesystem into an encrypted disk image.
|
|
|
|
rc, err = i.extractConfidentialWorkloadFS(i.confidentialWorkload)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
} else if i.squash {
|
2018-05-22 05:02:50 +08:00
|
|
|
// Extract the root filesystem as a single layer.
|
2022-04-29 21:39:42 +08:00
|
|
|
rc, errChan, err = i.extractRootfs(ExtractRootfsOptions{})
|
2018-05-22 05:02:50 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
} else {
|
2024-07-12 22:16:33 +08:00
|
|
|
if apiLayerIDs[layerID] {
|
|
|
|
// We're reading an API-supplied blob.
|
|
|
|
apiLayerIndex := slices.IndexFunc(apiLayers, linkedLayerHasLayerID)
|
|
|
|
f, err := os.Open(apiLayers[apiLayerIndex].linkedLayer.BlobPath)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("opening layer blob for %s: %w", layerID, err)
|
|
|
|
}
|
|
|
|
rc = f
|
|
|
|
} else if layerID == synthesizedLayerID {
|
|
|
|
// Slip in additional content as an additional layer.
|
|
|
|
if rc, err = os.Open(extraImageContentDiff); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
} else {
|
2023-12-13 03:54:42 +08:00
|
|
|
// If we're up to the final layer, but we don't want to
|
|
|
|
// include a diff for it, we're done.
|
|
|
|
if i.emptyLayer && layerID == i.layerID {
|
|
|
|
continue
|
|
|
|
}
|
2025-08-08 05:28:45 +08:00
|
|
|
if layerID == i.layerID {
|
|
|
|
// We need to filter out any mount targets that we created.
|
|
|
|
layerExclusions = append(slices.Clone(i.layerExclusions), i.layerMountTargets...)
|
|
|
|
// And we _might_ need to filter out directories that modified
|
|
|
|
// by creating and removing mount targets, _if_ they were the
|
|
|
|
// same in the base image for this stage.
|
|
|
|
layerPullUps, err := i.filterExclusionsByImage(i.layerPullUps, i.fromImageID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("checking which exclusions are in base image %q: %w", i.fromImageID, err)
|
|
|
|
}
|
|
|
|
layerExclusions = append(layerExclusions, layerPullUps...)
|
|
|
|
}
|
2023-12-13 03:54:42 +08:00
|
|
|
// Extract this layer, one of possibly many.
|
|
|
|
rc, err = i.store.Diff("", layerID, diffOptions)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("extracting %s: %w", what, err)
|
|
|
|
}
|
2018-05-22 05:02:50 +08:00
|
|
|
}
|
2017-01-27 00:58:00 +08:00
|
|
|
}
|
2017-01-28 15:18:02 +08:00
|
|
|
srcHasher := digest.Canonical.Digester()
|
2017-06-01 01:44:41 +08:00
|
|
|
// Set up to write the possibly-recompressed blob.
|
2024-08-16 00:50:07 +08:00
|
|
|
layerFile, err := os.OpenFile(filepath.Join(path, "layer"), os.O_CREATE|os.O_WRONLY, 0o600)
|
2017-01-27 00:58:00 +08:00
|
|
|
if err != nil {
|
2018-09-18 03:04:48 +08:00
|
|
|
rc.Close()
|
2022-09-18 18:36:08 +08:00
|
|
|
return nil, fmt.Errorf("opening file for %s: %w", what, err)
|
2017-01-27 00:58:00 +08:00
|
|
|
}
|
2021-08-28 05:11:44 +08:00
|
|
|
|
2017-01-28 15:18:02 +08:00
|
|
|
counter := ioutils.NewWriteCounter(layerFile)
|
2021-08-28 05:11:44 +08:00
|
|
|
var destHasher digest.Digester
|
|
|
|
var multiWriter io.Writer
|
2025-05-02 22:04:19 +08:00
|
|
|
// Avoid rehashing when we compress or mess with the layer contents somehow.
|
|
|
|
// At this point, there are multiple ways that can happen.
|
|
|
|
diffBeingAltered := i.compression != archive.Uncompressed
|
|
|
|
diffBeingAltered = diffBeingAltered || i.layerModTime != nil || i.layerLatestModTime != nil
|
2025-08-08 05:28:45 +08:00
|
|
|
diffBeingAltered = diffBeingAltered || len(layerExclusions) != 0
|
2025-05-02 22:04:19 +08:00
|
|
|
if diffBeingAltered {
|
2021-08-28 05:11:44 +08:00
|
|
|
destHasher = digest.Canonical.Digester()
|
|
|
|
multiWriter = io.MultiWriter(counter, destHasher.Hash())
|
|
|
|
} else {
|
|
|
|
destHasher = srcHasher
|
|
|
|
multiWriter = counter
|
|
|
|
}
|
2017-06-29 05:07:58 +08:00
|
|
|
// Compress the layer, if we're recompressing it.
|
2020-08-11 17:28:41 +08:00
|
|
|
writeCloser, err := archive.CompressStream(multiWriter, i.compression)
|
2017-01-28 15:18:02 +08:00
|
|
|
if err != nil {
|
2018-09-18 03:04:48 +08:00
|
|
|
layerFile.Close()
|
|
|
|
rc.Close()
|
2022-09-18 18:36:08 +08:00
|
|
|
return nil, fmt.Errorf("compressing %s: %w", what, err)
|
2017-01-28 15:18:02 +08:00
|
|
|
}
|
2020-08-11 17:28:41 +08:00
|
|
|
writer := io.MultiWriter(writeCloser, srcHasher.Hash())
|
2025-04-23 18:47:27 +08:00
|
|
|
|
|
|
|
// Use specified timestamps in the layer, if we're doing that for history
|
|
|
|
// entries.
|
2025-04-30 04:37:27 +08:00
|
|
|
nestedWriteCloser := ioutils.NewWriteCloserWrapper(writer, writeCloser.Close)
|
2025-08-08 05:28:45 +08:00
|
|
|
writeCloser = makeFilteredLayerWriteCloser(nestedWriteCloser, i.layerModTime, i.layerLatestModTime, layerExclusions)
|
2025-04-30 04:37:27 +08:00
|
|
|
writer = writeCloser
|
2024-05-01 03:31:01 +08:00
|
|
|
// Okay, copy from the raw diff through the filter, compressor, and counter and
|
|
|
|
// digesters.
|
2020-08-11 17:28:41 +08:00
|
|
|
size, err := io.Copy(writer, rc)
|
2025-05-02 22:04:19 +08:00
|
|
|
if err != nil {
|
|
|
|
writeCloser.Close()
|
|
|
|
layerFile.Close()
|
|
|
|
rc.Close()
|
|
|
|
return nil, fmt.Errorf("storing %s to file: on copy: %w", what, err)
|
|
|
|
}
|
2024-05-01 03:31:01 +08:00
|
|
|
if err := writeCloser.Close(); err != nil {
|
|
|
|
layerFile.Close()
|
|
|
|
rc.Close()
|
2025-05-02 22:04:19 +08:00
|
|
|
return nil, fmt.Errorf("storing %s to file: on pipe close: %w", what, err)
|
2024-05-01 03:31:01 +08:00
|
|
|
}
|
|
|
|
if err := layerFile.Close(); err != nil {
|
|
|
|
rc.Close()
|
2025-05-02 22:04:19 +08:00
|
|
|
return nil, fmt.Errorf("storing %s to file: on file close: %w", what, err)
|
2024-05-01 03:31:01 +08:00
|
|
|
}
|
2018-09-18 03:04:48 +08:00
|
|
|
rc.Close()
|
2021-03-12 16:15:41 +08:00
|
|
|
|
|
|
|
if errChan != nil {
|
|
|
|
err = <-errChan
|
|
|
|
if err != nil {
|
2024-05-01 03:31:01 +08:00
|
|
|
return nil, fmt.Errorf("extracting container rootfs: %w", err)
|
2021-03-12 16:15:41 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-27 00:58:00 +08:00
|
|
|
if err != nil {
|
2022-09-18 18:36:08 +08:00
|
|
|
return nil, fmt.Errorf("storing %s to file: %w", what, err)
|
2017-01-27 00:58:00 +08:00
|
|
|
}
|
2025-05-02 22:04:19 +08:00
|
|
|
if diffBeingAltered {
|
|
|
|
size = counter.Count
|
|
|
|
} else {
|
2017-01-28 15:18:02 +08:00
|
|
|
if size != counter.Count {
|
2022-09-18 18:36:08 +08:00
|
|
|
return nil, fmt.Errorf("storing %s to file: inconsistent layer size (copied %d, wrote %d)", what, size, counter.Count)
|
2017-01-28 15:18:02 +08:00
|
|
|
}
|
|
|
|
}
|
2019-07-25 22:10:03 +08:00
|
|
|
logrus.Debugf("%s size is %d bytes, uncompressed digest %s, possibly-compressed digest %s", what, size, srcHasher.Digest().String(), destHasher.Digest().String())
|
2017-06-01 01:44:41 +08:00
|
|
|
// Rename the layer so that we can more easily find it by digest later.
|
2018-10-18 06:06:16 +08:00
|
|
|
finalBlobName := filepath.Join(path, destHasher.Digest().String())
|
|
|
|
if err = os.Rename(filepath.Join(path, "layer"), finalBlobName); err != nil {
|
2022-09-18 18:36:08 +08:00
|
|
|
return nil, fmt.Errorf("storing %s to file while renaming %q to %q: %w", what, filepath.Join(path, "layer"), finalBlobName, err)
|
2017-03-07 23:41:25 +08:00
|
|
|
}
|
2025-05-21 04:08:17 +08:00
|
|
|
mb.addLayer(destHasher.Digest(), size, srcHasher.Digest())
|
2019-01-19 04:39:58 +08:00
|
|
|
}
|
2022-06-01 18:00:56 +08:00
|
|
|
|
2022-06-01 15:59:52 +08:00
|
|
|
// Only attempt to append history if history was not disabled explicitly.
|
|
|
|
if !i.omitHistory {
|
2025-05-21 04:08:17 +08:00
|
|
|
if err := mb.buildHistory(extraImageContentDiff, extraImageContentDiffDigest); err != nil {
|
|
|
|
return nil, err
|
2022-06-01 15:59:52 +08:00
|
|
|
}
|
2018-01-05 07:05:40 +08:00
|
|
|
}
|
|
|
|
|
2025-05-21 04:08:17 +08:00
|
|
|
imageManifest, config, err := mb.manifestAndConfig()
|
2017-05-18 05:01:06 +08:00
|
|
|
if err != nil {
|
2025-05-21 04:08:17 +08:00
|
|
|
return nil, err
|
2017-05-18 05:01:06 +08:00
|
|
|
}
|
2017-01-27 00:58:00 +08:00
|
|
|
src = &containerImageSource{
|
2018-10-18 06:06:16 +08:00
|
|
|
path: path,
|
|
|
|
ref: i,
|
|
|
|
store: i.store,
|
|
|
|
containerID: i.containerID,
|
|
|
|
mountLabel: i.mountLabel,
|
|
|
|
layerID: i.layerID,
|
|
|
|
names: i.names,
|
|
|
|
compression: i.compression,
|
|
|
|
config: config,
|
|
|
|
configDigest: digest.Canonical.FromBytes(config),
|
|
|
|
manifest: imageManifest,
|
2025-05-21 04:08:17 +08:00
|
|
|
manifestType: i.preferredManifestType,
|
2018-10-18 06:06:16 +08:00
|
|
|
blobDirectory: i.blobDirectory,
|
2021-05-26 04:34:36 +08:00
|
|
|
blobLayers: blobLayers,
|
2017-01-27 00:58:00 +08:00
|
|
|
}
|
|
|
|
return src, nil
|
|
|
|
}
|
|
|
|
|
2024-08-07 03:07:02 +08:00
|
|
|
func (i *containerImageRef) NewImageDestination(_ context.Context, _ *types.SystemContext) (types.ImageDestination, error) {
|
2022-07-06 17:14:06 +08:00
|
|
|
return nil, errors.New("can't write to a container")
|
2017-01-27 00:58:00 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
func (i *containerImageRef) DockerReference() reference.Named {
|
|
|
|
return i.name
|
|
|
|
}
|
|
|
|
|
|
|
|
func (i *containerImageRef) StringWithinTransport() string {
|
2017-06-02 00:09:23 +08:00
|
|
|
if len(i.names) > 0 {
|
|
|
|
return i.names[0]
|
2017-01-27 00:58:00 +08:00
|
|
|
}
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
|
2018-04-12 22:20:36 +08:00
|
|
|
func (i *containerImageRef) DeleteImage(context.Context, *types.SystemContext) error {
|
2017-01-27 00:58:00 +08:00
|
|
|
// we were never here
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (i *containerImageRef) PolicyConfigurationIdentity() string {
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
|
|
|
|
func (i *containerImageRef) PolicyConfigurationNamespaces() []string {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (i *containerImageRef) Transport() types.ImageTransport {
|
|
|
|
return is.Transport
|
|
|
|
}
|
|
|
|
|
2017-03-22 04:38:50 +08:00
|
|
|
func (i *containerImageSource) Close() error {
|
2017-01-27 00:58:00 +08:00
|
|
|
err := os.RemoveAll(i.path)
|
|
|
|
if err != nil {
|
2022-09-18 18:36:08 +08:00
|
|
|
return fmt.Errorf("removing layer blob directory: %w", err)
|
2017-01-27 00:58:00 +08:00
|
|
|
}
|
2018-10-03 22:05:46 +08:00
|
|
|
return nil
|
2017-01-27 00:58:00 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
func (i *containerImageSource) Reference() types.ImageReference {
|
|
|
|
return i.ref
|
|
|
|
}
|
|
|
|
|
2024-08-07 03:07:02 +08:00
|
|
|
func (i *containerImageSource) GetSignatures(_ context.Context, _ *digest.Digest) ([][]byte, error) {
|
2017-01-27 00:58:00 +08:00
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2024-08-07 03:07:02 +08:00
|
|
|
func (i *containerImageSource) GetManifest(_ context.Context, _ *digest.Digest) ([]byte, string, error) {
|
2017-06-29 05:07:58 +08:00
|
|
|
return i.manifest, i.manifestType, nil
|
2017-01-27 00:58:00 +08:00
|
|
|
}
|
|
|
|
|
2024-08-07 03:07:02 +08:00
|
|
|
func (i *containerImageSource) LayerInfosForCopy(_ context.Context, _ *digest.Digest) ([]types.BlobInfo, error) {
|
2018-02-23 01:12:59 +08:00
|
|
|
return nil, nil
|
2017-01-27 00:58:00 +08:00
|
|
|
}
|
|
|
|
|
2018-12-19 18:20:31 +08:00
|
|
|
func (i *containerImageSource) HasThreadSafeGetBlob() bool {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2024-08-07 03:07:02 +08:00
|
|
|
func (i *containerImageSource) GetBlob(_ context.Context, blob types.BlobInfo, _ types.BlobInfoCache) (reader io.ReadCloser, size int64, err error) {
|
2017-01-27 00:58:00 +08:00
|
|
|
if blob.Digest == i.configDigest {
|
|
|
|
logrus.Debugf("start reading config")
|
|
|
|
reader := bytes.NewReader(i.config)
|
|
|
|
closer := func() error {
|
|
|
|
logrus.Debugf("finished reading config")
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return ioutils.NewReadCloserWrapper(reader, closer), reader.Size(), nil
|
|
|
|
}
|
2021-05-26 04:34:36 +08:00
|
|
|
var layerReadCloser io.ReadCloser
|
|
|
|
size = -1
|
|
|
|
if blobLayerInfo, ok := i.blobLayers[blob.Digest]; ok {
|
|
|
|
noCompression := archive.Uncompressed
|
|
|
|
diffOptions := &storage.DiffOptions{
|
|
|
|
Compression: &noCompression,
|
2018-10-18 06:06:16 +08:00
|
|
|
}
|
2021-05-26 04:34:36 +08:00
|
|
|
layerReadCloser, err = i.store.Diff("", blobLayerInfo.ID, diffOptions)
|
|
|
|
size = blobLayerInfo.Size
|
|
|
|
} else {
|
|
|
|
for _, blobDir := range []string{i.blobDirectory, i.path} {
|
|
|
|
var layerFile *os.File
|
2024-08-16 00:50:07 +08:00
|
|
|
layerFile, err = os.OpenFile(filepath.Join(blobDir, blob.Digest.String()), os.O_RDONLY, 0o600)
|
2021-05-26 04:34:36 +08:00
|
|
|
if err == nil {
|
|
|
|
st, err := layerFile.Stat()
|
|
|
|
if err != nil {
|
|
|
|
logrus.Warnf("error reading size of layer file %q: %v", blob.Digest.String(), err)
|
|
|
|
} else {
|
|
|
|
size = st.Size()
|
|
|
|
layerReadCloser = layerFile
|
|
|
|
break
|
|
|
|
}
|
|
|
|
layerFile.Close()
|
|
|
|
}
|
2022-07-27 03:27:30 +08:00
|
|
|
if !errors.Is(err, os.ErrNotExist) {
|
2021-05-26 04:34:36 +08:00
|
|
|
logrus.Debugf("error checking for layer %q in %q: %v", blob.Digest.String(), blobDir, err)
|
|
|
|
}
|
2018-10-18 06:06:16 +08:00
|
|
|
}
|
|
|
|
}
|
2021-05-26 04:34:36 +08:00
|
|
|
if err != nil || layerReadCloser == nil || size == -1 {
|
2017-01-27 00:58:00 +08:00
|
|
|
logrus.Debugf("error reading layer %q: %v", blob.Digest.String(), err)
|
2022-09-18 18:36:08 +08:00
|
|
|
return nil, -1, fmt.Errorf("opening layer blob: %w", err)
|
2017-01-27 00:58:00 +08:00
|
|
|
}
|
|
|
|
logrus.Debugf("reading layer %q", blob.Digest.String())
|
2017-01-27 19:28:41 +08:00
|
|
|
closer := func() error {
|
|
|
|
logrus.Debugf("finished reading layer %q", blob.Digest.String())
|
2021-05-26 04:34:36 +08:00
|
|
|
if err := layerReadCloser.Close(); err != nil {
|
2022-09-18 18:36:08 +08:00
|
|
|
return fmt.Errorf("closing layer %q after reading: %w", blob.Digest.String(), err)
|
2018-10-03 22:05:46 +08:00
|
|
|
}
|
2017-01-27 19:28:41 +08:00
|
|
|
return nil
|
|
|
|
}
|
2021-05-26 04:34:36 +08:00
|
|
|
return ioutils.NewReadCloserWrapper(layerReadCloser, closer), size, nil
|
2017-01-27 00:58:00 +08:00
|
|
|
}
|
|
|
|
|
2023-12-13 03:54:42 +08:00
|
|
|
// makeExtraImageContentDiff creates an archive file containing the contents of
|
|
|
|
// files named in i.extraImageContent. The footer that marks the end of the
|
|
|
|
// archive may be omitted.
|
2025-04-30 05:04:20 +08:00
|
|
|
func (i *containerImageRef) makeExtraImageContentDiff(includeFooter bool, timestamp *time.Time) (_ string, _ digest.Digest, _ int64, retErr error) {
|
2023-12-13 03:54:42 +08:00
|
|
|
cdir, err := i.store.ContainerDirectory(i.containerID)
|
|
|
|
if err != nil {
|
|
|
|
return "", "", -1, err
|
|
|
|
}
|
|
|
|
diff, err := os.CreateTemp(cdir, "extradiff")
|
|
|
|
if err != nil {
|
|
|
|
return "", "", -1, err
|
|
|
|
}
|
|
|
|
defer diff.Close()
|
2024-05-14 04:15:42 +08:00
|
|
|
defer func() {
|
|
|
|
if retErr != nil {
|
|
|
|
os.Remove(diff.Name())
|
|
|
|
}
|
|
|
|
}()
|
2023-12-13 03:54:42 +08:00
|
|
|
digester := digest.Canonical.Digester()
|
|
|
|
counter := ioutils.NewWriteCounter(digester.Hash())
|
|
|
|
tw := tar.NewWriter(io.MultiWriter(diff, counter))
|
2025-04-30 05:04:20 +08:00
|
|
|
if timestamp == nil {
|
|
|
|
now := time.Now()
|
|
|
|
timestamp = &now
|
|
|
|
if i.created != nil {
|
|
|
|
timestamp = i.created
|
|
|
|
}
|
2023-12-13 03:54:42 +08:00
|
|
|
}
|
|
|
|
for path, contents := range i.extraImageContent {
|
|
|
|
if err := func() error {
|
|
|
|
content, err := os.Open(contents)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer content.Close()
|
|
|
|
st, err := content.Stat()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := tw.WriteHeader(&tar.Header{
|
|
|
|
Name: path,
|
|
|
|
Typeflag: tar.TypeReg,
|
|
|
|
Mode: 0o644,
|
2025-04-30 05:04:20 +08:00
|
|
|
ModTime: *timestamp,
|
2023-12-13 03:54:42 +08:00
|
|
|
Size: st.Size(),
|
|
|
|
}); err != nil {
|
2025-05-02 22:04:19 +08:00
|
|
|
return fmt.Errorf("writing header for %q: %w", path, err)
|
2023-12-13 03:54:42 +08:00
|
|
|
}
|
|
|
|
if _, err := io.Copy(tw, content); err != nil {
|
2025-04-30 04:37:27 +08:00
|
|
|
return fmt.Errorf("writing content for %q: %w", path, err)
|
2023-12-13 03:54:42 +08:00
|
|
|
}
|
|
|
|
if err := tw.Flush(); err != nil {
|
2025-05-02 22:04:19 +08:00
|
|
|
return fmt.Errorf("flushing content for %q: %w", path, err)
|
2023-12-13 03:54:42 +08:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}(); err != nil {
|
2025-05-02 22:04:19 +08:00
|
|
|
return "", "", -1, fmt.Errorf("writing %q to prepend-to-archive blob: %w", contents, err)
|
2023-12-13 03:54:42 +08:00
|
|
|
}
|
|
|
|
}
|
2025-05-02 22:04:19 +08:00
|
|
|
if includeFooter {
|
|
|
|
if err = tw.Close(); err != nil {
|
|
|
|
return "", "", -1, fmt.Errorf("closingprepend-to-archive blob after final write: %w", err)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if err = tw.Flush(); err != nil {
|
|
|
|
return "", "", -1, fmt.Errorf("flushing prepend-to-archive blob after final write: %w", err)
|
|
|
|
}
|
2023-12-13 03:54:42 +08:00
|
|
|
}
|
2024-05-14 04:15:42 +08:00
|
|
|
return diff.Name(), digester.Digest(), counter.Count, nil
|
2023-12-13 03:54:42 +08:00
|
|
|
}
|
|
|
|
|
2025-04-30 04:37:27 +08:00
|
|
|
// makeFilteredLayerWriteCloser returns either the passed-in WriteCloser, or if
|
|
|
|
// layerModeTime or layerLatestModTime are set, a WriteCloser which modifies
|
|
|
|
// the tarball that's written to it so that timestamps in headers are set to
|
|
|
|
// layerModTime exactly (if a value is provided for it), and then clamped to be
|
|
|
|
// no later than layerLatestModTime (if a value is provided for it).
|
|
|
|
// This implies that if both values are provided, the archive's timestamps will
|
|
|
|
// be set to the earlier of the two values.
|
2025-05-02 22:04:19 +08:00
|
|
|
func makeFilteredLayerWriteCloser(wc io.WriteCloser, layerModTime, layerLatestModTime *time.Time, exclusions []copier.ConditionalRemovePath) io.WriteCloser {
|
|
|
|
if layerModTime == nil && layerLatestModTime == nil && len(exclusions) == 0 {
|
2025-04-30 04:37:27 +08:00
|
|
|
return wc
|
|
|
|
}
|
2025-05-02 22:04:19 +08:00
|
|
|
exclusionsMap := make(map[string]copier.ConditionalRemovePath)
|
|
|
|
for _, exclusionSpec := range exclusions {
|
|
|
|
pathSpec := strings.Trim(path.Clean(exclusionSpec.Path), "/")
|
|
|
|
if pathSpec == "" {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
exclusionsMap[pathSpec] = exclusionSpec
|
|
|
|
}
|
2025-04-30 04:37:27 +08:00
|
|
|
wc = newTarFilterer(wc, func(hdr *tar.Header) (skip, replaceContents bool, replacementContents io.Reader) {
|
|
|
|
// Changing a zeroed field to a non-zero field can affect the
|
|
|
|
// format that the library uses for writing the header, so only
|
|
|
|
// change fields that are already set to avoid changing the
|
|
|
|
// format (and as a result, changing the length) of the header
|
|
|
|
// that we write.
|
|
|
|
modTime := hdr.ModTime
|
2025-05-02 22:04:19 +08:00
|
|
|
nameSpec := strings.Trim(path.Clean(hdr.Name), "/")
|
|
|
|
if conditions, ok := exclusionsMap[nameSpec]; ok {
|
|
|
|
if (conditions.ModTime == nil || conditions.ModTime.Equal(modTime)) &&
|
|
|
|
(conditions.Owner == nil || (conditions.Owner.UID == hdr.Uid && conditions.Owner.GID == hdr.Gid)) &&
|
|
|
|
(conditions.Mode == nil || (*conditions.Mode&os.ModePerm == os.FileMode(hdr.Mode)&os.ModePerm)) {
|
|
|
|
return true, false, nil
|
|
|
|
}
|
|
|
|
}
|
2025-04-30 04:37:27 +08:00
|
|
|
if layerModTime != nil {
|
|
|
|
modTime = *layerModTime
|
|
|
|
}
|
|
|
|
if layerLatestModTime != nil && layerLatestModTime.Before(modTime) {
|
|
|
|
modTime = *layerLatestModTime
|
|
|
|
}
|
|
|
|
if !hdr.ModTime.IsZero() {
|
|
|
|
hdr.ModTime = modTime
|
|
|
|
}
|
|
|
|
if !hdr.AccessTime.IsZero() {
|
|
|
|
hdr.AccessTime = modTime
|
|
|
|
}
|
|
|
|
if !hdr.ChangeTime.IsZero() {
|
|
|
|
hdr.ChangeTime = modTime
|
|
|
|
}
|
|
|
|
return false, false, nil
|
|
|
|
})
|
|
|
|
return wc
|
|
|
|
}
|
|
|
|
|
2024-07-12 22:16:33 +08:00
|
|
|
// makeLinkedLayerInfos calculates the size and digest information for a layer
|
|
|
|
// we intend to add to the image that we're committing.
|
2025-04-30 04:37:27 +08:00
|
|
|
func (b *Builder) makeLinkedLayerInfos(layers []LinkedLayer, layerType string, layerModTime, layerLatestModTime *time.Time) ([]commitLinkedLayerInfo, error) {
|
2024-07-12 22:16:33 +08:00
|
|
|
if layers == nil {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
infos := make([]commitLinkedLayerInfo, 0, len(layers))
|
|
|
|
for i, layer := range layers {
|
|
|
|
// complain if EmptyLayer and "is the BlobPath empty" don't agree
|
|
|
|
if layer.History.EmptyLayer != (layer.BlobPath == "") {
|
|
|
|
return nil, fmt.Errorf("internal error: layer-is-empty = %v, but content path is %q", layer.History.EmptyLayer, layer.BlobPath)
|
|
|
|
}
|
|
|
|
// if there's no layer contents, we're done with this one
|
|
|
|
if layer.History.EmptyLayer {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// check if it's a directory or a non-directory
|
|
|
|
st, err := os.Stat(layer.BlobPath)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("checking if layer content %s is a directory: %w", layer.BlobPath, err)
|
|
|
|
}
|
|
|
|
info := commitLinkedLayerInfo{
|
|
|
|
layerID: fmt.Sprintf("(%s %d)", layerType, i+1),
|
|
|
|
linkedLayer: layer,
|
|
|
|
}
|
|
|
|
if err = func() error {
|
2025-04-30 04:37:27 +08:00
|
|
|
cdir, err := b.store.ContainerDirectory(b.ContainerID)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("determining directory for working container: %w", err)
|
|
|
|
}
|
|
|
|
f, err := os.CreateTemp(cdir, "")
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("creating temporary file to hold blob for %q: %w", info.linkedLayer.BlobPath, err)
|
|
|
|
}
|
|
|
|
defer f.Close()
|
|
|
|
var rc io.ReadCloser
|
|
|
|
var what string
|
2024-07-12 22:16:33 +08:00
|
|
|
if st.IsDir() {
|
|
|
|
// if it's a directory, archive it and digest the archive while we're storing a copy somewhere
|
2025-04-30 04:37:27 +08:00
|
|
|
what = "directory"
|
|
|
|
rc, err = chrootarchive.Tar(info.linkedLayer.BlobPath, nil, info.linkedLayer.BlobPath)
|
2024-07-12 22:16:33 +08:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("generating a layer blob from %q: %w", info.linkedLayer.BlobPath, err)
|
|
|
|
}
|
|
|
|
} else {
|
2025-04-30 04:37:27 +08:00
|
|
|
what = "file"
|
|
|
|
// if it's not a directory, just digest it while we're storing a copy somewhere
|
|
|
|
rc, err = os.Open(info.linkedLayer.BlobPath)
|
2024-07-12 22:16:33 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2025-04-30 04:37:27 +08:00
|
|
|
|
|
|
|
digester := digest.Canonical.Digester()
|
|
|
|
sizeCountedFile := ioutils.NewWriteCounter(io.MultiWriter(digester.Hash(), f))
|
2025-05-02 22:04:19 +08:00
|
|
|
wc := makeFilteredLayerWriteCloser(ioutils.NopWriteCloser(sizeCountedFile), layerModTime, layerLatestModTime, nil)
|
2025-04-30 04:37:27 +08:00
|
|
|
_, copyErr := io.Copy(wc, rc)
|
|
|
|
wcErr := wc.Close()
|
|
|
|
if err := rc.Close(); err != nil {
|
|
|
|
return fmt.Errorf("storing a copy of %s %q: closing reader: %w", what, info.linkedLayer.BlobPath, err)
|
|
|
|
}
|
|
|
|
if copyErr != nil {
|
|
|
|
return fmt.Errorf("storing a copy of %s %q: copying data: %w", what, info.linkedLayer.BlobPath, copyErr)
|
|
|
|
}
|
|
|
|
if wcErr != nil {
|
|
|
|
return fmt.Errorf("storing a copy of %s %q: closing writer: %w", what, info.linkedLayer.BlobPath, wcErr)
|
|
|
|
}
|
|
|
|
info.uncompressedDigest = digester.Digest()
|
|
|
|
info.size = sizeCountedFile.Count
|
|
|
|
info.linkedLayer.BlobPath = f.Name()
|
2024-07-12 22:16:33 +08:00
|
|
|
return nil
|
|
|
|
}(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
infos = append(infos, info)
|
|
|
|
}
|
|
|
|
return infos, nil
|
|
|
|
}
|
|
|
|
|
2023-12-13 03:54:42 +08:00
|
|
|
// makeContainerImageRef creates a containers/image/v5/types.ImageReference
|
|
|
|
// which is mainly used for representing the working container as a source
|
2024-07-12 22:16:33 +08:00
|
|
|
// image that can be copied, which is how we commit the container to create the
|
2023-12-13 03:54:42 +08:00
|
|
|
// image.
|
2022-04-29 21:39:42 +08:00
|
|
|
func (b *Builder) makeContainerImageRef(options CommitOptions) (*containerImageRef, error) {
|
2024-07-12 22:16:33 +08:00
|
|
|
if (len(options.PrependedLinkedLayers) > 0 || len(options.AppendedLinkedLayers) > 0) &&
|
|
|
|
(options.ConfidentialWorkloadOptions.Convert || options.Squash) {
|
|
|
|
return nil, errors.New("can't add prebuilt layers and produce an image with only one layer, at the same time")
|
|
|
|
}
|
2017-01-27 00:58:00 +08:00
|
|
|
var name reference.Named
|
2017-06-29 05:07:58 +08:00
|
|
|
container, err := b.store.Container(b.ContainerID)
|
|
|
|
if err != nil {
|
2022-09-18 18:36:08 +08:00
|
|
|
return nil, fmt.Errorf("locating container %q: %w", b.ContainerID, err)
|
2017-06-29 05:07:58 +08:00
|
|
|
}
|
|
|
|
if len(container.Names) > 0 {
|
|
|
|
if parsed, err2 := reference.ParseNamed(container.Names[0]); err2 == nil {
|
2017-06-02 00:09:23 +08:00
|
|
|
name = parsed
|
|
|
|
}
|
|
|
|
}
|
2025-05-02 22:04:19 +08:00
|
|
|
|
|
|
|
cdir, err := b.store.ContainerDirectory(b.ContainerID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("getting the per-container data directory for %q: %w", b.ContainerID, err)
|
|
|
|
}
|
|
|
|
|
2025-08-08 05:28:45 +08:00
|
|
|
gatherExclusions := func(excludesFiles []string) ([]copier.ConditionalRemovePath, error) {
|
|
|
|
var excludes []copier.ConditionalRemovePath
|
|
|
|
for _, excludesFile := range excludesFiles {
|
|
|
|
if strings.Contains(excludesFile, containerExcludesSubstring) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
excludesData, err := os.ReadFile(excludesFile)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("reading commit exclusions for %q: %w", b.ContainerID, err)
|
|
|
|
}
|
|
|
|
var theseExcludes []copier.ConditionalRemovePath
|
|
|
|
if err := json.Unmarshal(excludesData, &theseExcludes); err != nil {
|
|
|
|
return nil, fmt.Errorf("parsing commit exclusions for %q: %w", b.ContainerID, err)
|
|
|
|
}
|
|
|
|
excludes = append(excludes, theseExcludes...)
|
|
|
|
}
|
|
|
|
return excludes, nil
|
|
|
|
}
|
2025-07-29 07:56:42 +08:00
|
|
|
mountTargetFiles, err := filepath.Glob(filepath.Join(cdir, containerExcludesDir, "*"))
|
2025-05-02 22:04:19 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("checking for commit exclusions for %q: %w", b.ContainerID, err)
|
|
|
|
}
|
2025-07-29 07:56:42 +08:00
|
|
|
pulledUpFiles, err := filepath.Glob(filepath.Join(cdir, containerPulledUpDir, "*"))
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("checking for commit pulled-up items for %q: %w", b.ContainerID, err)
|
|
|
|
}
|
2025-08-08 05:28:45 +08:00
|
|
|
layerMountTargets, err := gatherExclusions(mountTargetFiles)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2025-07-29 07:56:42 +08:00
|
|
|
}
|
2025-08-08 05:28:45 +08:00
|
|
|
if len(layerMountTargets) > 0 {
|
|
|
|
logrus.Debugf("these items were created for use as mount targets: %#v", layerMountTargets)
|
2025-05-02 22:04:19 +08:00
|
|
|
}
|
2025-08-08 05:28:45 +08:00
|
|
|
layerPullUps, err := gatherExclusions(pulledUpFiles)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if len(layerPullUps) > 0 {
|
|
|
|
logrus.Debugf("these items appear to have been pulled up: %#v", layerPullUps)
|
|
|
|
}
|
|
|
|
var layerExclusions []copier.ConditionalRemovePath
|
2025-05-02 22:04:19 +08:00
|
|
|
if options.CompatLayerOmissions == types.OptionalBoolTrue {
|
2025-08-08 05:28:45 +08:00
|
|
|
layerExclusions = slices.Clone(compatLayerExclusions)
|
|
|
|
}
|
|
|
|
if len(layerExclusions) > 0 {
|
|
|
|
logrus.Debugf("excluding these items from committed layer: %#v", layerExclusions)
|
2025-05-02 22:04:19 +08:00
|
|
|
}
|
|
|
|
|
2019-04-24 21:12:01 +08:00
|
|
|
manifestType := options.PreferredManifestType
|
2017-05-18 05:02:40 +08:00
|
|
|
if manifestType == "" {
|
2021-03-02 02:07:58 +08:00
|
|
|
manifestType = define.OCIv1ImageManifest
|
2017-05-18 05:02:40 +08:00
|
|
|
}
|
2021-11-19 05:26:32 +08:00
|
|
|
|
2021-11-02 04:52:48 +08:00
|
|
|
for _, u := range options.UnsetEnvs {
|
2021-11-19 05:26:32 +08:00
|
|
|
b.UnsetEnv(u)
|
2021-11-02 04:52:48 +08:00
|
|
|
}
|
2021-11-19 05:26:32 +08:00
|
|
|
oconfig, err := json.Marshal(&b.OCIv1)
|
2017-05-18 05:01:06 +08:00
|
|
|
if err != nil {
|
2022-09-18 18:36:08 +08:00
|
|
|
return nil, fmt.Errorf("encoding OCI-format image configuration %#v: %w", b.OCIv1, err)
|
2017-05-18 05:01:06 +08:00
|
|
|
}
|
2021-11-19 05:26:32 +08:00
|
|
|
dconfig, err := json.Marshal(&b.Docker)
|
Maintain multiple working container configs
Maintain the container configuration in multiple formats in the Buildah
object, initializing one based on the other, depending on which format
the source image used for its configuration.
Replace directly manipulated fields in the Buildah object (Annotations,
CreatedBy, OS, Architecture, Maintainer, User, Workdir, Env, Cmd,
Entrypoint, Expose, Labels, and Volumes) with accessor functions which
update both configurations and which read from whichever one we consider
to be authoritative. Drop Args because we weren't using them.
Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
Closes: #102
Approved by: rhatdan
2017-05-16 23:08:52 +08:00
|
|
|
if err != nil {
|
2022-09-18 18:36:08 +08:00
|
|
|
return nil, fmt.Errorf("encoding docker-format image configuration %#v: %w", b.Docker, err)
|
Maintain multiple working container configs
Maintain the container configuration in multiple formats in the Buildah
object, initializing one based on the other, depending on which format
the source image used for its configuration.
Replace directly manipulated fields in the Buildah object (Annotations,
CreatedBy, OS, Architecture, Maintainer, User, Workdir, Env, Cmd,
Entrypoint, Expose, Labels, and Volumes) with accessor functions which
update both configurations and which read from whichever one we consider
to be authoritative. Drop Args because we weren't using them.
Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
Closes: #102
Approved by: rhatdan
2017-05-16 23:08:52 +08:00
|
|
|
}
|
2025-04-30 04:37:27 +08:00
|
|
|
var created, layerModTime, layerLatestModTime *time.Time
|
2019-04-24 21:12:01 +08:00
|
|
|
if options.HistoryTimestamp != nil {
|
2020-09-01 05:09:10 +08:00
|
|
|
historyTimestampUTC := options.HistoryTimestamp.UTC()
|
|
|
|
created = &historyTimestampUTC
|
2025-04-30 04:37:27 +08:00
|
|
|
layerModTime = &historyTimestampUTC
|
|
|
|
}
|
|
|
|
if options.SourceDateEpoch != nil {
|
|
|
|
sourceDateEpochUTC := options.SourceDateEpoch.UTC()
|
|
|
|
created = &sourceDateEpochUTC
|
|
|
|
if options.RewriteTimestamp {
|
|
|
|
layerLatestModTime = &sourceDateEpochUTC
|
|
|
|
}
|
2017-06-07 02:11:46 +08:00
|
|
|
}
|
2019-03-23 05:06:56 +08:00
|
|
|
createdBy := b.CreatedBy()
|
|
|
|
if createdBy == "" {
|
|
|
|
createdBy = strings.Join(b.Shell(), " ")
|
|
|
|
if createdBy == "" {
|
|
|
|
createdBy = "/bin/sh"
|
|
|
|
}
|
|
|
|
}
|
2018-06-09 00:55:46 +08:00
|
|
|
|
2019-04-15 22:02:05 +08:00
|
|
|
parent := ""
|
2024-01-18 05:14:43 +08:00
|
|
|
forceOmitHistory := false
|
2019-04-15 22:02:05 +08:00
|
|
|
if b.FromImageID != "" {
|
|
|
|
parentDigest := digest.NewDigestFromEncoded(digest.Canonical, b.FromImageID)
|
|
|
|
if parentDigest.Validate() == nil {
|
|
|
|
parent = parentDigest.String()
|
|
|
|
}
|
2024-01-18 05:14:43 +08:00
|
|
|
if !options.OmitHistory && len(b.OCIv1.History) == 0 && len(b.OCIv1.RootFS.DiffIDs) != 0 {
|
|
|
|
// Parent had layers, but no history. We shouldn't confuse
|
|
|
|
// our own confidence checks by adding history for layers
|
|
|
|
// that we're adding, creating an image with multiple layers,
|
|
|
|
// only some of which have history entries, which would be
|
|
|
|
// broken in confusing ways.
|
|
|
|
b.Logger.Debugf("parent image %q had no history but had %d layers, assuming OmitHistory", b.FromImageID, len(b.OCIv1.RootFS.DiffIDs))
|
|
|
|
forceOmitHistory = true
|
|
|
|
}
|
2019-04-15 22:02:05 +08:00
|
|
|
}
|
|
|
|
|
2025-04-30 04:37:27 +08:00
|
|
|
preLayerInfos, err := b.makeLinkedLayerInfos(append(slices.Clone(b.PrependedLinkedLayers), slices.Clone(options.PrependedLinkedLayers)...), "prepended layer", layerModTime, layerLatestModTime)
|
2024-07-12 22:16:33 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2025-04-30 04:37:27 +08:00
|
|
|
postLayerInfos, err := b.makeLinkedLayerInfos(append(slices.Clone(options.AppendedLinkedLayers), slices.Clone(b.AppendedLinkedLayers)...), "appended layer", layerModTime, layerLatestModTime)
|
2024-07-12 22:16:33 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2017-02-11 00:48:15 +08:00
|
|
|
ref := &containerImageRef{
|
2020-08-08 01:11:31 +08:00
|
|
|
fromImageName: b.FromImage,
|
|
|
|
fromImageID: b.FromImageID,
|
2017-05-18 05:02:40 +08:00
|
|
|
store: b.store,
|
2019-04-24 21:12:01 +08:00
|
|
|
compression: options.Compression,
|
2017-05-18 05:02:40 +08:00
|
|
|
name: name,
|
2017-06-29 05:07:58 +08:00
|
|
|
names: container.Names,
|
2018-05-22 05:02:50 +08:00
|
|
|
containerID: container.ID,
|
|
|
|
mountLabel: b.MountLabel,
|
2017-06-29 05:07:58 +08:00
|
|
|
layerID: container.LayerID,
|
2017-05-18 05:02:40 +08:00
|
|
|
oconfig: oconfig,
|
|
|
|
dconfig: dconfig,
|
2020-09-01 05:09:10 +08:00
|
|
|
created: created,
|
2019-03-23 05:06:56 +08:00
|
|
|
createdBy: createdBy,
|
2025-04-30 04:37:27 +08:00
|
|
|
layerModTime: layerModTime,
|
|
|
|
layerLatestModTime: layerLatestModTime,
|
2018-04-27 22:59:03 +08:00
|
|
|
historyComment: b.HistoryComment(),
|
2017-05-18 05:02:40 +08:00
|
|
|
annotations: b.Annotations(),
|
2025-06-21 03:05:20 +08:00
|
|
|
setAnnotations: slices.Clone(options.Annotations),
|
|
|
|
unsetAnnotations: slices.Clone(options.UnsetAnnotations),
|
2017-05-18 05:02:40 +08:00
|
|
|
preferredManifestType: manifestType,
|
2019-04-24 21:12:01 +08:00
|
|
|
squash: options.Squash,
|
2023-07-18 04:27:19 +08:00
|
|
|
confidentialWorkload: options.ConfidentialWorkloadOptions,
|
2024-01-18 05:14:43 +08:00
|
|
|
omitHistory: options.OmitHistory || forceOmitHistory,
|
2025-06-18 22:18:21 +08:00
|
|
|
emptyLayer: (options.EmptyLayer || options.OmitLayerHistoryEntry) && !options.Squash && !options.ConfidentialWorkloadOptions.Convert,
|
|
|
|
omitLayerHistoryEntry: options.OmitLayerHistoryEntry && !options.Squash && !options.ConfidentialWorkloadOptions.Convert,
|
2019-07-25 22:10:03 +08:00
|
|
|
idMappingOptions: &b.IDMappingOptions,
|
2018-06-09 00:55:46 +08:00
|
|
|
parent: parent,
|
2019-04-24 21:12:01 +08:00
|
|
|
blobDirectory: options.BlobDirectory,
|
2024-07-12 22:16:33 +08:00
|
|
|
preEmptyLayers: slices.Clone(b.PrependedEmptyLayers),
|
|
|
|
preLayers: preLayerInfos,
|
|
|
|
postEmptyLayers: slices.Clone(b.AppendedEmptyLayers),
|
|
|
|
postLayers: postLayerInfos,
|
2023-11-01 22:18:40 +08:00
|
|
|
overrideChanges: options.OverrideChanges,
|
|
|
|
overrideConfig: options.OverrideConfig,
|
2024-06-04 02:44:47 +08:00
|
|
|
extraImageContent: maps.Clone(options.ExtraImageContent),
|
2024-06-07 03:57:56 +08:00
|
|
|
compatSetParent: options.CompatSetParent,
|
2025-05-02 22:04:19 +08:00
|
|
|
layerExclusions: layerExclusions,
|
2025-08-08 05:28:45 +08:00
|
|
|
layerMountTargets: layerMountTargets,
|
|
|
|
layerPullUps: layerPullUps,
|
2025-06-21 03:05:20 +08:00
|
|
|
createdAnnotation: options.CreatedAnnotation,
|
2017-01-27 00:58:00 +08:00
|
|
|
}
|
2024-07-12 22:16:33 +08:00
|
|
|
if ref.created != nil {
|
|
|
|
for i := range ref.preEmptyLayers {
|
|
|
|
ref.preEmptyLayers[i].Created = ref.created
|
|
|
|
}
|
|
|
|
for i := range ref.preLayers {
|
|
|
|
ref.preLayers[i].linkedLayer.History.Created = ref.created
|
|
|
|
}
|
|
|
|
for i := range ref.postEmptyLayers {
|
|
|
|
ref.postEmptyLayers[i].Created = ref.created
|
|
|
|
}
|
|
|
|
for i := range ref.postLayers {
|
|
|
|
ref.postLayers[i].linkedLayer.History.Created = ref.created
|
|
|
|
}
|
|
|
|
}
|
2017-02-11 00:48:15 +08:00
|
|
|
return ref, nil
|
2017-01-27 00:58:00 +08:00
|
|
|
}
|
2022-04-29 21:39:42 +08:00
|
|
|
|
|
|
|
// Extract the container's whole filesystem as if it were a single layer from current builder instance
|
|
|
|
func (b *Builder) ExtractRootfs(options CommitOptions, opts ExtractRootfsOptions) (io.ReadCloser, chan error, error) {
|
|
|
|
src, err := b.makeContainerImageRef(options)
|
|
|
|
if err != nil {
|
2022-09-18 18:36:08 +08:00
|
|
|
return nil, nil, fmt.Errorf("creating image reference for container %q to extract its contents: %w", b.ContainerID, err)
|
2022-04-29 21:39:42 +08:00
|
|
|
}
|
|
|
|
return src.extractRootfs(opts)
|
|
|
|
}
|