Merge pull request #6332 from nalind/exclude-pull-ups-take-three

Rework how we decide what to filter out of layer diffs
This commit is contained in:
openshift-merge-bot[bot] 2025-08-13 15:44:31 +00:00 committed by GitHub
commit 3ca907f7e4
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
10 changed files with 234 additions and 100 deletions

View File

@ -201,7 +201,7 @@ func (req *request) UIDMap() []idtools.IDMap {
case requestEval:
return nil
case requestStat:
return nil
return req.StatOptions.UIDMap
case requestGet:
return req.GetOptions.UIDMap
case requestPut:
@ -226,7 +226,7 @@ func (req *request) GIDMap() []idtools.IDMap {
case requestEval:
return nil
case requestStat:
return nil
return req.StatOptions.GIDMap
case requestGet:
return req.GetOptions.GIDMap
case requestPut:
@ -284,6 +284,7 @@ type StatForItem struct {
Size int64 // dereferenced value for symlinks
Mode os.FileMode // dereferenced value for symlinks
ModTime time.Time // dereferenced value for symlinks
UID, GID int64 // usually in the uint32 range, set to -1 if unknown
IsSymlink bool
IsDir bool // dereferenced value for symlinks
IsRegular bool // dereferenced value for symlinks
@ -342,6 +343,7 @@ func Eval(root string, directory string, _ EvalOptions) (string, error) {
// StatOptions controls parts of Stat()'s behavior.
type StatOptions struct {
UIDMap, GIDMap []idtools.IDMap // map from hostIDs to containerIDs when returning results
CheckForArchives bool // check for and populate the IsArchive bit in returned values
Excludes []string // contents to pretend don't exist, using the OS-specific path separator
}
@ -975,7 +977,7 @@ func copierHandler(bulkReader io.Reader, bulkWriter io.Writer, req request) (*re
resp := copierHandlerEval(req)
return resp, nil, nil
case requestStat:
resp := copierHandlerStat(req, pm)
resp := copierHandlerStat(req, pm, idMappings)
return resp, nil, nil
case requestGet:
return copierHandlerGet(bulkWriter, req, pm, idMappings)
@ -1102,7 +1104,7 @@ func copierHandlerEval(req request) *response {
return &response{Eval: evalResponse{Evaluated: filepath.Join(req.rootPrefix, resolvedTarget)}}
}
func copierHandlerStat(req request, pm *fileutils.PatternMatcher) *response {
func copierHandlerStat(req request, pm *fileutils.PatternMatcher, idMappings *idtools.IDMappings) *response {
errorResponse := func(fmtspec string, args ...any) *response {
return &response{Error: fmt.Sprintf(fmtspec, args...), Stat: statResponse{}}
}
@ -1160,6 +1162,17 @@ func copierHandlerStat(req request, pm *fileutils.PatternMatcher) *response {
}
result.Size = linfo.Size()
result.Mode = linfo.Mode()
result.UID, result.GID = -1, -1
if uid, gid, err := owner(linfo); err == nil {
if idMappings != nil && !idMappings.Empty() {
hostPair := idtools.IDPair{UID: uid, GID: gid}
uid, gid, err = idMappings.ToContainer(hostPair)
if err != nil {
return errorResponse("copier: stat: mapping host filesystem owners %#v to container filesystem owners: %w", hostPair, err)
}
}
result.UID, result.GID = int64(uid), int64(gid)
}
result.ModTime = linfo.ModTime()
result.IsDir = linfo.IsDir()
result.IsRegular = result.Mode.IsRegular()
@ -1272,7 +1285,7 @@ func checkLinks(item string, req request, info os.FileInfo) (string, os.FileInfo
func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMatcher, idMappings *idtools.IDMappings) (*response, func() error, error) {
statRequest := req
statRequest.Request = requestStat
statResponse := copierHandlerStat(req, pm)
statResponse := copierHandlerStat(req, pm, idMappings)
errorResponse := func(fmtspec string, args ...any) (*response, func() error, error) {
return &response{Error: fmt.Sprintf(fmtspec, args...), Stat: statResponse.Stat, Get: getResponse{}}, nil, nil
}
@ -1591,6 +1604,7 @@ func copierHandlerGetOne(srcfi os.FileInfo, symlinkTarget, name, contentPath str
if name != "" {
hdr.Name = filepath.ToSlash(name)
}
hdr.Uname, hdr.Gname = "", ""
if options.Rename != nil {
hdr.Name = handleRename(options.Rename, hdr.Name)
}
@ -1698,6 +1712,9 @@ func copierHandlerGetOne(srcfi os.FileInfo, symlinkTarget, name, contentPath str
if options.ChmodDirs != nil {
hdr.Mode = int64(*options.ChmodDirs)
}
if !strings.HasSuffix(hdr.Name, "/") {
hdr.Name += "/"
}
} else {
if options.ChownFiles != nil {
hdr.Uid, hdr.Gid = options.ChownFiles.UID, options.ChownFiles.GID
@ -2266,7 +2283,7 @@ type EnsurePath struct {
// EnsureOptions controls parts of Ensure()'s behavior.
type EnsureOptions struct {
UIDMap, GIDMap []idtools.IDMap // map from hostIDs to containerIDs in the chroot
UIDMap, GIDMap []idtools.IDMap // map from containerIDs to hostIDs in the chroot
Paths []EnsurePath
}
@ -2433,7 +2450,7 @@ type ConditionalRemovePath struct {
// ConditionalRemoveOptions controls parts of ConditionalRemove()'s behavior.
type ConditionalRemoveOptions struct {
UIDMap, GIDMap []idtools.IDMap // map from hostIDs to containerIDs in the chroot
UIDMap, GIDMap []idtools.IDMap // map from containerIDs to hostIDs in the chroot
Paths []ConditionalRemovePath
}

View File

@ -184,7 +184,7 @@ func TestGetNoCrossDevice(t *testing.T) {
tr := tar.NewReader(&buf)
th, err := tr.Next() // should be the "subdir" directory
require.NoError(t, err, "error reading first entry archived")
assert.Equal(t, "subdir", th.Name, `first entry in archive was not named "subdir"`)
assert.Equal(t, "subdir/", th.Name, `first entry in archive was not named "subdir/"`)
th, err = tr.Next()
assert.Error(t, err, "should not have gotten a second entry in archive")

View File

@ -716,6 +716,7 @@ func testStat(t *testing.T) {
if actualContent, ok := testArchive.contents[testItem.Name]; ok {
testItem.Size = int64(len(actualContent))
}
checkStatInfoOwnership(t, result)
require.Equal(t, testItem.Size, result.Size, "unexpected size difference for %q", name)
require.True(t, result.IsRegular, "expected %q.IsRegular to be true", glob)
require.False(t, result.IsDir, "expected %q.IsDir to be false", glob)
@ -912,22 +913,22 @@ func testGetMultiple(t *testing.T) {
{Name: "non-archive-a", Typeflag: tar.TypeReg, Size: 1199, Mode: 0o600},
{Name: "hlink-0", Typeflag: tar.TypeLink, Linkname: "file-0", Size: 123456789, Mode: 0o600},
{Name: "something-a", Typeflag: tar.TypeReg, Size: 34, Mode: 0o600},
{Name: "subdir-a", Typeflag: tar.TypeDir, Mode: 0o700},
{Name: "subdir-a/", Typeflag: tar.TypeDir, Mode: 0o700},
{Name: "subdir-a/file-n", Typeflag: tar.TypeReg, Size: 108, Mode: 0o660},
{Name: "subdir-a/file-o", Typeflag: tar.TypeReg, Size: 45, Mode: 0o660},
{Name: "subdir-a/file-a", Typeflag: tar.TypeSymlink, Linkname: "../file-a", Size: 23, Mode: 0o600},
{Name: "subdir-a/file-b", Typeflag: tar.TypeSymlink, Linkname: "../../file-b", Size: 23, Mode: 0o600},
{Name: "subdir-a/file-c", Typeflag: tar.TypeReg, Size: 56, Mode: 0o600},
{Name: "subdir-b", Typeflag: tar.TypeDir, Mode: 0o700},
{Name: "subdir-b/", Typeflag: tar.TypeDir, Mode: 0o700},
{Name: "subdir-b/file-n", Typeflag: tar.TypeReg, Size: 216, Mode: 0o660},
{Name: "subdir-b/file-o", Typeflag: tar.TypeReg, Size: 67, Mode: 0o660},
{Name: "subdir-c", Typeflag: tar.TypeDir, Mode: 0o700},
{Name: "subdir-c/", Typeflag: tar.TypeDir, Mode: 0o700},
{Name: "subdir-c/file-p", Typeflag: tar.TypeReg, Size: 432, Mode: 0o666},
{Name: "subdir-c/file-q", Typeflag: tar.TypeReg, Size: 78, Mode: 0o666},
{Name: "subdir-d", Typeflag: tar.TypeDir, Mode: 0o700},
{Name: "subdir-d/", Typeflag: tar.TypeDir, Mode: 0o700},
{Name: "subdir-d/hlink-0", Typeflag: tar.TypeLink, Linkname: "../file-0", Size: 123456789, Mode: 0o600},
{Name: "subdir-e", Typeflag: tar.TypeDir, Mode: 0o700},
{Name: "subdir-e/subdir-f", Typeflag: tar.TypeDir, Mode: 0o700},
{Name: "subdir-e/", Typeflag: tar.TypeDir, Mode: 0o700},
{Name: "subdir-e/subdir-f/", Typeflag: tar.TypeDir, Mode: 0o700},
{Name: "subdir-e/subdir-f/hlink-b", Typeflag: tar.TypeLink, Linkname: "../../file-b", Size: 23, Mode: 0o600},
},
contents: map[string][]byte{
@ -952,22 +953,22 @@ func testGetMultiple(t *testing.T) {
"something-a",
"archive-a",
"non-archive-a",
"subdir-a",
"subdir-a/",
"subdir-a/file-n",
"subdir-a/file-o",
"subdir-a/file-a",
"subdir-a/file-b",
"subdir-a/file-c",
"subdir-b",
"subdir-b/",
"subdir-b/file-n",
"subdir-b/file-o",
"subdir-c",
"subdir-c/",
"subdir-c/file-p",
"subdir-c/file-q",
"subdir-d",
"subdir-d/",
"subdir-d/hlink-0",
"subdir-e",
"subdir-e/subdir-f",
"subdir-e/",
"subdir-e/subdir-f/",
"subdir-e/subdir-f/hlink-b",
},
},
@ -995,7 +996,7 @@ func testGetMultiple(t *testing.T) {
"file-q", // from subdir-c
"file-q", // from link-c -> subdir-c
"hlink-0", // from subdir-d
"subdir-f", // from subdir-e
"subdir-f/", // from subdir-e
"subdir-f/hlink-b", // from subdir-e
},
},
@ -1019,16 +1020,16 @@ func testGetMultiple(t *testing.T) {
"link-c",
"hlink-0",
// "subdir-a/file-c", // strings.HasPrefix("**/*-c", "subdir-a/") is false
"subdir-b",
"subdir-b/",
"subdir-b/file-n",
"subdir-b/file-o",
"subdir-c",
"subdir-c/",
"subdir-c/file-p",
"subdir-c/file-q",
"subdir-d",
"subdir-d/",
"subdir-d/hlink-0",
"subdir-e",
"subdir-e/subdir-f",
"subdir-e/",
"subdir-e/subdir-f/",
"subdir-e/subdir-f/hlink-b",
},
},
@ -1048,7 +1049,7 @@ func testGetMultiple(t *testing.T) {
"file-q", // from link-c -> subdir-c
"hlink-0",
"hlink-0",
"subdir-f",
"subdir-f/",
"subdir-f/hlink-b",
},
},
@ -1066,22 +1067,22 @@ func testGetMultiple(t *testing.T) {
"something-a",
"archive-a",
"non-archive-a",
"subdir-a",
"subdir-a/",
"subdir-a/file-a",
"subdir-a/file-b",
"subdir-a/file-c",
"subdir-a/file-n",
"subdir-a/file-o",
"subdir-b",
"subdir-b/",
"subdir-b/file-n",
"subdir-b/file-o",
"subdir-c",
"subdir-c/",
"subdir-c/file-p",
"subdir-c/file-q",
"subdir-d",
"subdir-d/",
"subdir-d/hlink-0",
"subdir-e",
"subdir-e/subdir-f",
"subdir-e/",
"subdir-e/subdir-f/",
"subdir-e/subdir-f/hlink-b",
},
},
@ -1110,7 +1111,7 @@ func testGetMultiple(t *testing.T) {
"something-a",
"archive-a",
"non-archive-a",
"subdir-f",
"subdir-f/",
"subdir-f/hlink-b",
},
},
@ -1133,7 +1134,7 @@ func testGetMultiple(t *testing.T) {
items: []string{
// "subdir-a/file-c", // strings.HasPrefix("**/*-c", "subdir-a/") is false
"link-c",
"subdir-c",
"subdir-c/",
"subdir-c/file-p",
"subdir-c/file-q",
},
@ -1241,7 +1242,7 @@ func testGetMultiple(t *testing.T) {
name: "subdirectory",
pattern: "subdir-e",
items: []string{
"subdir-f",
"subdir-f/",
"subdir-f/hlink-b",
},
},
@ -1275,7 +1276,7 @@ func testGetMultiple(t *testing.T) {
name: "subdir-without-name",
pattern: "subdir-e",
items: []string{
"subdir-f",
"subdir-f/",
"subdir-f/hlink-b",
},
},
@ -1284,8 +1285,8 @@ func testGetMultiple(t *testing.T) {
pattern: "subdir-e",
keepDirectoryNames: true,
items: []string{
"subdir-e",
"subdir-e/subdir-f",
"subdir-e/",
"subdir-e/subdir-f/",
"subdir-e/subdir-f/hlink-b",
},
},
@ -1336,7 +1337,7 @@ func testGetMultiple(t *testing.T) {
"archive-a",
"non-archive-a",
"something-a",
"subdir-b",
"subdir-b/",
"subdir-b/file-n",
"subdir-b/file-o",
"subdir-b/file-a",
@ -1391,11 +1392,11 @@ func testGetMultiple(t *testing.T) {
"something-a",
"archive-a",
"non-archive-a",
"subdir-a",
"subdir-b",
"subdir-c",
"subdir-d",
"subdir-e",
"subdir-a/",
"subdir-b/",
"subdir-c/",
"subdir-d/",
"subdir-e/",
"subdir-a/file-n",
"subdir-a/file-o",
"subdir-a/file-a",
@ -1408,7 +1409,7 @@ func testGetMultiple(t *testing.T) {
"subdir-c/file-q",
"subdir-c/file-q",
"subdir-d/hlink-0",
"subdir-e/subdir-f",
"subdir-e/subdir-f/",
"subdir-e/subdir-f/hlink-b",
},
},
@ -1420,11 +1421,11 @@ func testGetMultiple(t *testing.T) {
items: []string{
"file-0",
"file-b",
"subdir-a",
"subdir-b",
"subdir-c",
"subdir-d",
"subdir-e",
"subdir-a/",
"subdir-b/",
"subdir-c/",
"subdir-d/",
"subdir-e/",
"subdir-a/file-c",
"subdir-b/file-n",
"subdir-b/file-o",
@ -1434,7 +1435,7 @@ func testGetMultiple(t *testing.T) {
"subdir-c/file-q",
"hlink-0",
"subdir-d/hlink-0",
"subdir-e/subdir-f",
"subdir-e/subdir-f/",
"subdir-e/subdir-f/hlink-b",
},
},
@ -1448,7 +1449,7 @@ func testGetMultiple(t *testing.T) {
"something-a",
"archive-a",
"non-archive-a",
"subdir-a",
"subdir-a/",
"subdir-a/file-n",
"subdir-a/file-o",
"subdir-a/file-a",
@ -1461,7 +1462,7 @@ func testGetMultiple(t *testing.T) {
pattern: "/subdir-b/*",
parents: true,
items: []string{
"subdir-b",
"subdir-b/",
"subdir-b/file-n",
"subdir-b/file-o",
},
@ -1471,18 +1472,18 @@ func testGetMultiple(t *testing.T) {
pattern: "../../subdir-b/*",
parents: true,
items: []string{
"subdir-b",
"subdir-b/",
"subdir-b/file-n",
"subdir-b/file-o",
},
},
{
name: "dir-with-parents",
pattern: "subdir-e/subdir-f",
pattern: "subdir-e/subdir-f/",
parents: true,
items: []string{
"subdir-e",
"subdir-e/subdir-f",
"subdir-e/",
"subdir-e/subdir-f/",
"subdir-e/subdir-f/hlink-b",
},
},
@ -1491,8 +1492,8 @@ func testGetMultiple(t *testing.T) {
pattern: "subdir-e/subdir-f/hlink-b",
parents: true,
items: []string{
"subdir-e",
"subdir-e/subdir-f",
"subdir-e/",
"subdir-e/subdir-f/",
"subdir-e/subdir-f/hlink-b",
},
},
@ -1555,11 +1556,16 @@ func testGetMultiple(t *testing.T) {
tr := tar.NewReader(pipeReader)
hdr, err := tr.Next()
actualContents := []string{}
for err == nil {
for hdr != nil {
actualContents = append(actualContents, filepath.FromSlash(hdr.Name))
assert.Equal(t, "", hdr.Uname, "expected user name field to be cleared")
assert.Equal(t, "", hdr.Gname, "expected group name field to be cleared")
if testCase.timestamp != nil {
assert.Truef(t, testCase.timestamp.Equal(hdr.ModTime), "timestamp was supposed to be forced for %q", hdr.Name)
}
if err != nil {
break
}
hdr, err = tr.Next()
}
pipeReader.Close()

View File

@ -5,6 +5,8 @@ package copier
import (
"os"
"testing"
"github.com/stretchr/testify/require"
)
const (
@ -101,3 +103,9 @@ func TestConditionalRemoveChroot(t *testing.T) {
testConditionalRemove(t)
canChroot = couldChroot
}
func checkStatInfoOwnership(t *testing.T, result *StatForItem) {
t.Helper()
require.EqualValues(t, 0, result.UID, "expected the owning user to be reported")
require.EqualValues(t, 0, result.GID, "expected the owning group to be reported")
}

View File

@ -2,7 +2,14 @@
package copier
const (
testModeMask = int64(0o600)
testIgnoreSymlinkDates = true
import (
"testing"
"github.com/stretchr/testify/require"
)
func checkStatInfoOwnership(t *testing.T, result *StatForItem) {
t.Helper()
require.EqualValues(t, -1, result.UID, "expected the owning user to not be supported")
require.EqualValues(t, -1, result.GID, "expected the owning group to not be supported")
}

120
image.go
View File

@ -107,6 +107,8 @@ type containerImageRef struct {
extraImageContent map[string]string
compatSetParent types.OptionalBool
layerExclusions []copier.ConditionalRemovePath
layerMountTargets []copier.ConditionalRemovePath
layerPullUps []copier.EnsureParentPath
unsetAnnotations []string
setAnnotations []string
createdAnnotation types.OptionalBool
@ -784,6 +786,55 @@ func (mb *ociManifestBuilder) manifestAndConfig() ([]byte, []byte, error) {
return omanifestbytes, oconfig, nil
}
// filterExclusionsByImage returns a slice of the members of "exclusions" which are present in the image with the specified ID
func (i containerImageRef) filterExclusionsByImage(exclusions []copier.EnsureParentPath, imageID string) ([]copier.EnsureParentPath, error) {
if len(exclusions) == 0 || imageID == "" {
return nil, nil
}
var paths []copier.EnsureParentPath
mountPoint, err := i.store.MountImage(imageID, nil, i.mountLabel)
if err != nil {
return nil, err
}
defer func() {
if _, err := i.store.UnmountImage(imageID, false); err != nil {
logrus.Debugf("unmounting image %q: %v", imageID, err)
}
}()
globs := make([]string, 0, len(exclusions))
for _, exclusion := range exclusions {
globs = append(globs, exclusion.Path)
}
options := copier.StatOptions{}
stats, err := copier.Stat(mountPoint, mountPoint, options, globs)
if err != nil {
return nil, fmt.Errorf("checking for potential exclusion items in image %q: %w", imageID, err)
}
for _, stat := range stats {
for _, exclusion := range exclusions {
if stat.Glob != exclusion.Path {
continue
}
for result, stat := range stat.Results {
if result != exclusion.Path {
continue
}
if exclusion.ModTime != nil && !exclusion.ModTime.Equal(stat.ModTime) {
continue
}
if exclusion.Mode != nil && *exclusion.Mode != stat.Mode {
continue
}
if exclusion.Owner != nil && (int64(exclusion.Owner.UID) != stat.UID && int64(exclusion.Owner.GID) != stat.GID) {
continue
}
paths = append(paths, exclusion)
}
}
}
return paths, nil
}
func (i *containerImageRef) NewImageSource(_ context.Context, _ *types.SystemContext) (src types.ImageSource, err error) {
// These maps will let us check if a layer ID is part of one group or another.
parentLayerIDs := make(map[string]bool)
@ -948,6 +999,7 @@ func (i *containerImageRef) NewImageSource(_ context.Context, _ *types.SystemCon
}
var rc io.ReadCloser
var errChan chan error
var layerExclusions []copier.ConditionalRemovePath
if i.confidentialWorkload.Convert {
// Convert the root filesystem into an encrypted disk image.
rc, err = i.extractConfidentialWorkloadFS(i.confidentialWorkload)
@ -980,6 +1032,18 @@ func (i *containerImageRef) NewImageSource(_ context.Context, _ *types.SystemCon
if i.emptyLayer && layerID == i.layerID {
continue
}
if layerID == i.layerID {
// We need to filter out any mount targets that we created.
layerExclusions = append(slices.Clone(i.layerExclusions), i.layerMountTargets...)
// And we _might_ need to filter out directories that modified
// by creating and removing mount targets, _if_ they were the
// same in the base image for this stage.
layerPullUps, err := i.filterExclusionsByImage(i.layerPullUps, i.fromImageID)
if err != nil {
return nil, fmt.Errorf("checking which exclusions are in base image %q: %w", i.fromImageID, err)
}
layerExclusions = append(layerExclusions, layerPullUps...)
}
// Extract this layer, one of possibly many.
rc, err = i.store.Diff("", layerID, diffOptions)
if err != nil {
@ -1002,7 +1066,7 @@ func (i *containerImageRef) NewImageSource(_ context.Context, _ *types.SystemCon
// At this point, there are multiple ways that can happen.
diffBeingAltered := i.compression != archive.Uncompressed
diffBeingAltered = diffBeingAltered || i.layerModTime != nil || i.layerLatestModTime != nil
diffBeingAltered = diffBeingAltered || len(i.layerExclusions) != 0
diffBeingAltered = diffBeingAltered || len(layerExclusions) != 0
if diffBeingAltered {
destHasher = digest.Canonical.Digester()
multiWriter = io.MultiWriter(counter, destHasher.Hash())
@ -1022,7 +1086,7 @@ func (i *containerImageRef) NewImageSource(_ context.Context, _ *types.SystemCon
// Use specified timestamps in the layer, if we're doing that for history
// entries.
nestedWriteCloser := ioutils.NewWriteCloserWrapper(writer, writeCloser.Close)
writeCloser = makeFilteredLayerWriteCloser(nestedWriteCloser, i.layerModTime, i.layerLatestModTime, i.layerExclusions)
writeCloser = makeFilteredLayerWriteCloser(nestedWriteCloser, i.layerModTime, i.layerLatestModTime, layerExclusions)
writer = writeCloser
// Okay, copy from the raw diff through the filter, compressor, and counter and
// digesters.
@ -1443,19 +1507,8 @@ func (b *Builder) makeContainerImageRef(options CommitOptions) (*containerImageR
return nil, fmt.Errorf("getting the per-container data directory for %q: %w", b.ContainerID, err)
}
mountTargetFiles, err := filepath.Glob(filepath.Join(cdir, containerExcludesDir, "*"))
if err != nil {
return nil, fmt.Errorf("checking for commit exclusions for %q: %w", b.ContainerID, err)
}
pulledUpFiles, err := filepath.Glob(filepath.Join(cdir, containerPulledUpDir, "*"))
if err != nil {
return nil, fmt.Errorf("checking for commit pulled-up items for %q: %w", b.ContainerID, err)
}
excludesFiles := slices.Clone(mountTargetFiles)
if !options.ConfidentialWorkloadOptions.Convert && !options.Squash {
excludesFiles = append(excludesFiles, pulledUpFiles...)
}
var layerExclusions []copier.ConditionalRemovePath
gatherExclusions := func(excludesFiles []string) ([]copier.ConditionalRemovePath, error) {
var excludes []copier.ConditionalRemovePath
for _, excludesFile := range excludesFiles {
if strings.Contains(excludesFile, containerExcludesSubstring) {
continue
@ -1464,16 +1517,43 @@ func (b *Builder) makeContainerImageRef(options CommitOptions) (*containerImageR
if err != nil {
return nil, fmt.Errorf("reading commit exclusions for %q: %w", b.ContainerID, err)
}
var excludes []copier.ConditionalRemovePath
if err := json.Unmarshal(excludesData, &excludes); err != nil {
var theseExcludes []copier.ConditionalRemovePath
if err := json.Unmarshal(excludesData, &theseExcludes); err != nil {
return nil, fmt.Errorf("parsing commit exclusions for %q: %w", b.ContainerID, err)
}
layerExclusions = append(layerExclusions, excludes...)
excludes = append(excludes, theseExcludes...)
}
return excludes, nil
}
mountTargetFiles, err := filepath.Glob(filepath.Join(cdir, containerExcludesDir, "*"))
if err != nil {
return nil, fmt.Errorf("checking for commit exclusions for %q: %w", b.ContainerID, err)
}
pulledUpFiles, err := filepath.Glob(filepath.Join(cdir, containerPulledUpDir, "*"))
if err != nil {
return nil, fmt.Errorf("checking for commit pulled-up items for %q: %w", b.ContainerID, err)
}
layerMountTargets, err := gatherExclusions(mountTargetFiles)
if err != nil {
return nil, err
}
if len(layerMountTargets) > 0 {
logrus.Debugf("these items were created for use as mount targets: %#v", layerMountTargets)
}
layerPullUps, err := gatherExclusions(pulledUpFiles)
if err != nil {
return nil, err
}
if len(layerPullUps) > 0 {
logrus.Debugf("these items appear to have been pulled up: %#v", layerPullUps)
}
var layerExclusions []copier.ConditionalRemovePath
if options.CompatLayerOmissions == types.OptionalBoolTrue {
layerExclusions = append(layerExclusions, compatLayerExclusions...)
layerExclusions = slices.Clone(compatLayerExclusions)
}
if len(layerExclusions) > 0 {
logrus.Debugf("excluding these items from committed layer: %#v", layerExclusions)
}
manifestType := options.PreferredManifestType
if manifestType == "" {
@ -1577,6 +1657,8 @@ func (b *Builder) makeContainerImageRef(options CommitOptions) (*containerImageR
extraImageContent: maps.Clone(options.ExtraImageContent),
compatSetParent: options.CompatSetParent,
layerExclusions: layerExclusions,
layerMountTargets: layerMountTargets,
layerPullUps: layerPullUps,
createdAnnotation: options.CreatedAnnotation,
}
if ref.created != nil {

View File

@ -862,7 +862,7 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
logrus.Debugf("stage %d name: %q resolves to %q", stageIndex, stageName, baseWithArg)
stageName = baseWithArg
// If --from=<index> convert index to name
if index, err := strconv.Atoi(stageName); err == nil {
if index, err := strconv.Atoi(stageName); err == nil && index >= 0 && index < stageIndex {
stageName = stages[index].Name
}
// Check if selected base is not an additional

View File

@ -467,7 +467,7 @@ func (s *StageExecutor) performCopy(excludes []string, copies ...imagebuilder.Co
// exists and if stage short_name matches any
// additionalContext replace stage with additional
// build context.
if index, err := strconv.Atoi(from); err == nil {
if index, err := strconv.Atoi(from); err == nil && index >= 0 && index < s.index {
from = s.stages[index].Name
}
if foundContext, ok := s.executor.additionalBuildContexts[from]; ok {
@ -1395,7 +1395,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
// also account if the index is given instead
// of name so convert index in --from=<index>
// to name.
if index, err := strconv.Atoi(from); err == nil {
if index, err := strconv.Atoi(from); err == nil && index >= 0 && index < s.index {
from = s.stages[index].Name
}
// If additional buildContext contains this

View File

@ -8817,17 +8817,26 @@ RUN --mount=type=bind,ro,src=/Dockerfile,target=/etc/removed-file --mount=type=b
_EOF
local source_date_epoch=$(date +%s)
# build a copy of the image that's all squashed
run_buildah build --no-cache --squash --source-date-epoch=$source_date_epoch --rewrite-timestamp -t dir:${TEST_SCRATCH_DIR}/squashed-image ${contextdir}
# build a copy of the image that's "normal"
run_buildah build --no-cache --layers --source-date-epoch=$source_date_epoch --rewrite-timestamp -t not-squashed-image ${contextdir}
# now squash it, with no record of needing to exclude anything carrying over
run_buildah from --name not-squashed-container not-squashed-image
run_buildah commit --squash not-squashed-container dir:${TEST_SCRATCH_DIR}/squashed-later-image
# find the diffs for the two versions, which should look the same
# build a copy of the image that's all squashed from the start
run_buildah build --no-cache --squash=true --source-date-epoch=$source_date_epoch --rewrite-timestamp -t dir:${TEST_SCRATCH_DIR}/squashed-image ${contextdir}
# build a copy of the image where we only commit at the end
run_buildah build --no-cache --layers=false --source-date-epoch=$source_date_epoch --rewrite-timestamp -t not-layered-image ${contextdir}
# build a copy of the image where we commit at every step
run_buildah build --no-cache --layers=true --source-date-epoch=$source_date_epoch --rewrite-timestamp -t layered-image ${contextdir}
# now squash them, with no internal record of needing to exclude anything carrying over
run_buildah from --name not-layered-container not-layered-image
run_buildah commit --squash not-layered-container dir:${TEST_SCRATCH_DIR}/squashed-not-layered-image
run_buildah from --name layered-container layered-image
run_buildah commit --squash layered-container dir:${TEST_SCRATCH_DIR}/squashed-layered-image
# find the diffs for the three versions, which should look the same thanks to the --source-date-epoch --rewrite-timestamp combo
local squashed=${TEST_SCRATCH_DIR}/squashed-image/$(dir_image_last_diff ${TEST_SCRATCH_DIR}/squashed-image)
local squashedlater=${TEST_SCRATCH_DIR}/squashed-later-image/$(dir_image_last_diff ${TEST_SCRATCH_DIR}/squashed-later-image)
cmp ${squashed} ${squashedlater}
local notlayered=${TEST_SCRATCH_DIR}/squashed-not-layered-image/$(dir_image_last_diff ${TEST_SCRATCH_DIR}/squashed-not-layered-image)
local layered=${TEST_SCRATCH_DIR}/squashed-layered-image/$(dir_image_last_diff ${TEST_SCRATCH_DIR}/squashed-layered-image)
tar tvf ${squashed} > ${TEST_SCRATCH_DIR}/squashed-image-rootfs.txt
tar tvf ${notlayered} > ${TEST_SCRATCH_DIR}/squashed-not-layered-image-rootfs.txt
tar tvf ${layered} > ${TEST_SCRATCH_DIR}/squashed-layered-image-rootfs.txt
diff -u ${TEST_SCRATCH_DIR}/squashed-layered-image-rootfs.txt ${TEST_SCRATCH_DIR}/squashed-image-rootfs.txt
diff -u ${TEST_SCRATCH_DIR}/squashed-layered-image-rootfs.txt ${TEST_SCRATCH_DIR}/squashed-not-layered-image-rootfs.txt
}
@test "bud COPY one file to ..../. creates the destination directory" {

View File

@ -27,6 +27,11 @@ docker pull registry.fedoraproject.org/fedora-minimal:42-x86_64
docker pull registry.fedoraproject.org/fedora-minimal
```
This test program is used as input in a few of the conformance tests:
```
make tests/conformance/testdata/mount-targets/true
```
You can run all of the tests with go test (and under `buildah unshare` or `podman unshare` if you're not root):
```
go test -v -timeout=30m -tags "$(./btrfs_installed_tag.sh)" ./tests/conformance