imagebuildah.StageExecutor: clean up volumes/volumeCache
Clean up the distinctions between the volumes slice and the volumeCache and volumeCacheInfo maps so that --compat-volumes will work correctly when we're building with multiple layers. Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
This commit is contained in:
parent
dc2a5cb82e
commit
a47261ec11
|
@ -1,6 +1,7 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/containers/buildah/imagebuildah"
|
||||
|
@ -73,7 +74,7 @@ func buildCmd(c *cobra.Command, inputArgs []string, iopts buildahcli.BuildOption
|
|||
if c.Flag("logfile").Changed {
|
||||
logfile, err := os.OpenFile(iopts.Logfile, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("opening log file: %w", err)
|
||||
}
|
||||
iopts.Logwriter = logfile
|
||||
defer iopts.Logwriter.Close()
|
||||
|
|
|
@ -143,7 +143,7 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B
|
|||
}
|
||||
contents, err = os.Open(dfile)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
return "", nil, fmt.Errorf("reading build instructions: %w", err)
|
||||
}
|
||||
dinfo, err = contents.Stat()
|
||||
if err != nil {
|
||||
|
|
|
@ -222,7 +222,7 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o
|
|||
} else {
|
||||
rusageLogFile, err = os.OpenFile(options.RusageLogFile, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("creating file to store rusage logs: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -67,9 +67,9 @@ type StageExecutor struct {
|
|||
name string
|
||||
builder *buildah.Builder
|
||||
preserved int
|
||||
volumes imagebuilder.VolumeSet
|
||||
volumeCache map[string]string
|
||||
volumeCacheInfo map[string]os.FileInfo
|
||||
volumes imagebuilder.VolumeSet // list of directories which are volumes
|
||||
volumeCache map[string]string // mapping from volume directories to cache archives (used by vfs method)
|
||||
volumeCacheInfo map[string]os.FileInfo // mapping from volume directories to perms/datestamps to reset after restoring
|
||||
mountPoint string
|
||||
output string
|
||||
containerIDs []string
|
||||
|
@ -92,7 +92,7 @@ type StageExecutor struct {
|
|||
// writeable while the RUN instruction is being handled, even if any changes
|
||||
// made within the directory are ultimately discarded.
|
||||
func (s *StageExecutor) Preserve(path string) error {
|
||||
logrus.Debugf("PRESERVE %q in %q", path, s.builder.ContainerID)
|
||||
logrus.Debugf("PRESERVE %q in %q (already preserving %v)", path, s.builder.ContainerID, s.volumes)
|
||||
|
||||
// Try and resolve the symlink (if one exists)
|
||||
// Set archivedPath and path based on whether a symlink is found or not
|
||||
|
@ -111,71 +111,61 @@ func (s *StageExecutor) Preserve(path string) error {
|
|||
return fmt.Errorf("evaluating path %q: %w", path, err)
|
||||
}
|
||||
|
||||
// Whether or not we're caching and restoring the contents of this
|
||||
// directory, we need to ensure it exists now.
|
||||
const createdDirPerms = os.FileMode(0o755)
|
||||
if s.executor.compatVolumes != types.OptionalBoolTrue {
|
||||
logrus.Debugf("ensuring volume path %q exists", path)
|
||||
st, err := os.Stat(archivedPath)
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
// Yup, we do have to create it. That means it's not in any
|
||||
// cached copy of the path that covers it, so we have to
|
||||
// invalidate such cached copy.
|
||||
logrus.Debugf("have to create volume %q", path)
|
||||
createdDirPerms := createdDirPerms
|
||||
if err := copier.Mkdir(s.mountPoint, archivedPath, copier.MkdirOptions{ChmodNew: &createdDirPerms}); err != nil {
|
||||
return fmt.Errorf("ensuring volume path exists: %w", err)
|
||||
}
|
||||
logrus.Debugf("not doing volume save-and-restore of %q in %q", path, s.builder.ContainerID)
|
||||
return nil
|
||||
if err := s.volumeCacheInvalidate(path); err != nil {
|
||||
return fmt.Errorf("ensuring volume path %q is preserved: %w", filepath.Join(s.mountPoint, path), err)
|
||||
}
|
||||
if st, err = os.Stat(archivedPath); err != nil {
|
||||
return fmt.Errorf("checking on just-created volume path: %w", err)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("reading info cache for volume at %q: %w", path, err)
|
||||
}
|
||||
|
||||
if s.volumes.Covers(path) {
|
||||
// This path is a subdirectory of a volume path that we're
|
||||
// already preserving, so there's nothing new to be done except
|
||||
// ensure that it exists.
|
||||
st, err := os.Stat(archivedPath)
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
// We do have to create it. That means it's not in any
|
||||
// cached copy of the path that covers it, so we have
|
||||
// to invalidate such cached copy.
|
||||
logrus.Debugf("have to create volume %q", path)
|
||||
createdDirPerms := createdDirPerms
|
||||
if err := copier.Mkdir(s.mountPoint, filepath.Join(s.mountPoint, path), copier.MkdirOptions{ChmodNew: &createdDirPerms}); err != nil {
|
||||
return fmt.Errorf("ensuring volume path exists: %w", err)
|
||||
}
|
||||
if err := s.volumeCacheInvalidate(path); err != nil {
|
||||
return fmt.Errorf("ensuring volume path %q is preserved: %w", filepath.Join(s.mountPoint, path), err)
|
||||
}
|
||||
if st, err = os.Stat(archivedPath); err != nil {
|
||||
return fmt.Errorf("checking on just-created volume path: %w", err)
|
||||
}
|
||||
}
|
||||
// already preserving, so there's nothing new to be done now
|
||||
// that we've ensured that it exists.
|
||||
s.volumeCacheInfo[path] = st
|
||||
return nil
|
||||
}
|
||||
|
||||
// Figure out where the cache for this volume would be stored.
|
||||
s.preserved++
|
||||
cacheDir, err := s.executor.store.ContainerDirectory(s.builder.ContainerID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to locate temporary directory for container")
|
||||
}
|
||||
cacheFile := filepath.Join(cacheDir, fmt.Sprintf("volume%d.tar", s.preserved))
|
||||
|
||||
// Save info about the top level of the location that we'll be archiving.
|
||||
st, err := os.Stat(archivedPath)
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
logrus.Debugf("have to create volume %q", path)
|
||||
createdDirPerms := os.FileMode(0o755)
|
||||
if err = copier.Mkdir(s.mountPoint, archivedPath, copier.MkdirOptions{ChmodNew: &createdDirPerms}); err != nil {
|
||||
return fmt.Errorf("ensuring volume path exists: %w", err)
|
||||
}
|
||||
st, err = os.Stat(archivedPath)
|
||||
}
|
||||
if err != nil {
|
||||
logrus.Debugf("error reading info about %q: %v", archivedPath, err)
|
||||
return err
|
||||
}
|
||||
s.volumeCacheInfo[path] = st
|
||||
// Add the new volume path to the ones that we're tracking.
|
||||
if !s.volumes.Add(path) {
|
||||
// This path is not a subdirectory of a volume path that we're
|
||||
// already preserving, so adding it to the list should have
|
||||
// worked.
|
||||
return fmt.Errorf("adding %q to the volume cache", path)
|
||||
}
|
||||
s.volumeCacheInfo[path] = st
|
||||
|
||||
// If we're not doing save/restore, we're done, since volumeCache
|
||||
// should be empty.
|
||||
if s.executor.compatVolumes != types.OptionalBoolTrue {
|
||||
logrus.Debugf("not doing volume save-and-restore of %q in %q", path, s.builder.ContainerID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decide where the cache for this volume will be stored.
|
||||
s.preserved++
|
||||
cacheDir, err := s.executor.store.ContainerDirectory(s.builder.ContainerID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to locate temporary directory for container")
|
||||
}
|
||||
cacheFile := filepath.Join(cacheDir, fmt.Sprintf("volume%d.tar", s.preserved))
|
||||
s.volumeCache[path] = cacheFile
|
||||
|
||||
// Now prune cache files for volumes that are newly supplanted by this one.
|
||||
|
@ -206,7 +196,7 @@ func (s *StageExecutor) Preserve(path string) error {
|
|||
if errors.Is(err, os.ErrNotExist) {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
return fmt.Errorf("removing cache of %q: %w", archivedPath, err)
|
||||
}
|
||||
delete(s.volumeCache, cachedPath)
|
||||
}
|
||||
|
@ -256,16 +246,12 @@ func (s *StageExecutor) volumeCacheSaveVFS() (mounts []specs.Mount, err error) {
|
|||
continue
|
||||
}
|
||||
if !errors.Is(err, os.ErrNotExist) {
|
||||
return nil, err
|
||||
}
|
||||
createdDirPerms := os.FileMode(0755)
|
||||
if err := copier.Mkdir(s.mountPoint, archivedPath, copier.MkdirOptions{ChmodNew: &createdDirPerms}); err != nil {
|
||||
return nil, fmt.Errorf("ensuring volume path exists: %w", err)
|
||||
return nil, fmt.Errorf("checking for presence of a cached copy of %q at %q: %w", cachedPath, cacheFile, err)
|
||||
}
|
||||
logrus.Debugf("caching contents of volume %q in %q", archivedPath, cacheFile)
|
||||
cache, err := os.Create(cacheFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("creating cache for volume %q: %w", archivedPath, err)
|
||||
}
|
||||
defer cache.Close()
|
||||
rc, err := chrootarchive.Tar(archivedPath, nil, s.mountPoint)
|
||||
|
@ -298,16 +284,12 @@ func (s *StageExecutor) volumeCacheRestoreVFS() (err error) {
|
|||
logrus.Debugf("restoring contents of volume %q from %q", archivedPath, cacheFile)
|
||||
cache, err := os.Open(cacheFile)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("restoring contents of volume %q: %w", archivedPath, err)
|
||||
}
|
||||
defer cache.Close()
|
||||
if err := copier.Remove(s.mountPoint, archivedPath, copier.RemoveOptions{All: true}); err != nil {
|
||||
return err
|
||||
}
|
||||
createdDirPerms := os.FileMode(0o755)
|
||||
if err := copier.Mkdir(s.mountPoint, archivedPath, copier.MkdirOptions{ChmodNew: &createdDirPerms}); err != nil {
|
||||
return err
|
||||
}
|
||||
err = chrootarchive.Untar(cache, archivedPath, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("extracting archive at %q: %w", archivedPath, err)
|
||||
|
@ -334,13 +316,11 @@ func (s *StageExecutor) volumeCacheRestoreVFS() (err error) {
|
|||
}
|
||||
|
||||
// Save the contents of each of the executor's list of volumes for which we
|
||||
// don't already have a cache file.
|
||||
// don't already have a cache file. For overlay, we "save" and "restore" by
|
||||
// using it as a lower for an overlay mount in the same location, and then
|
||||
// discarding the upper.
|
||||
func (s *StageExecutor) volumeCacheSaveOverlay() (mounts []specs.Mount, err error) {
|
||||
for cachedPath := range s.volumeCache {
|
||||
err = copier.Mkdir(s.mountPoint, filepath.Join(s.mountPoint, cachedPath), copier.MkdirOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("ensuring volume exists: %w", err)
|
||||
}
|
||||
volumePath := filepath.Join(s.mountPoint, cachedPath)
|
||||
mount := specs.Mount{
|
||||
Source: volumePath,
|
||||
|
@ -1079,6 +1059,7 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo
|
|||
s.mountPoint = mountPoint
|
||||
s.builder = builder
|
||||
// Now that the rootfs is mounted, set up handling of volumes from the base image.
|
||||
s.volumes = make([]string, 0, len(s.volumes))
|
||||
s.volumeCache = make(map[string]string)
|
||||
s.volumeCacheInfo = make(map[string]os.FileInfo)
|
||||
for _, v := range builder.Volumes() {
|
||||
|
|
|
@ -2468,18 +2468,33 @@ _EOF
|
|||
skip_if_no_runtime
|
||||
|
||||
_prefetch alpine
|
||||
target=volume-image
|
||||
run_buildah build $WITH_POLICY_JSON -t ${target} --compat-volumes $BUDFILES/preserve-volumes
|
||||
run_buildah from --quiet ${target}
|
||||
cid=$output
|
||||
run_buildah mount ${cid}
|
||||
root=$output
|
||||
test -s $root/vol/subvol/subsubvol/subsubvolfile
|
||||
test ! -s $root/vol/subvol/subvolfile
|
||||
test -s $root/vol/volfile
|
||||
test -s $root/vol/Dockerfile
|
||||
test -s $root/vol/Dockerfile2
|
||||
test ! -s $root/vol/anothervolfile
|
||||
for layers in "" --layers ; do
|
||||
for compat in "" --compat-volumes ; do
|
||||
target=volume-image$compat$layers
|
||||
run_buildah build $WITH_POLICY_JSON -t ${target} ${layers} ${compat} $BUDFILES/preserve-volumes
|
||||
run_buildah from --quiet ${target}
|
||||
cid=$output
|
||||
run_buildah mount ${cid}
|
||||
root=$output
|
||||
# these files were created before VOLUME instructions froze the directories that contained them
|
||||
test -s $root/vol/subvol/subsubvol/subsubvolfile
|
||||
test -s $root/vol/volfile
|
||||
if test "$compat" != "" ; then
|
||||
# true, these files should have been discarded after they were created by RUN instructions
|
||||
test ! -s $root/vol/subvol/subvolfile
|
||||
test ! -s $root/vol/anothervolfile
|
||||
else
|
||||
# false, these files should not have been discarded, despite being created by RUN instructions
|
||||
test -s $root/vol/subvol/subvolfile
|
||||
test -s $root/vol/anothervolfile
|
||||
fi
|
||||
# and these were ADDed
|
||||
test -s $root/vol/Dockerfile
|
||||
test -s $root/vol/Dockerfile2
|
||||
run_buildah rm ${cid}
|
||||
run_buildah rmi ${target}
|
||||
done
|
||||
done
|
||||
}
|
||||
|
||||
# Helper function for several of the tests which pull from http.
|
||||
|
@ -2701,16 +2716,33 @@ function validate_instance_compression {
|
|||
skip_if_no_runtime
|
||||
|
||||
_prefetch alpine
|
||||
target=volume-image
|
||||
run_buildah build $WITH_POLICY_JSON -t ${target} --compat-volumes=true $BUDFILES/volume-perms
|
||||
run_buildah from --quiet $WITH_POLICY_JSON ${target}
|
||||
cid=$output
|
||||
run_buildah mount ${cid}
|
||||
root=$output
|
||||
test ! -s $root/vol/subvol/subvolfile
|
||||
run stat -c %f $root/vol/subvol
|
||||
assert "$status" -eq 0 "status code from stat $root/vol/subvol"
|
||||
expect_output "41ed" "stat($root/vol/subvol) [0x41ed = 040755]"
|
||||
for layers in "" --layers ; do
|
||||
for compat in "" --compat-volumes ; do
|
||||
target=volume-image$compat$layers
|
||||
run_buildah build $WITH_POLICY_JSON -t ${target} ${layers} ${compat} $BUDFILES/volume-perms
|
||||
run_buildah from --quiet $WITH_POLICY_JSON ${target}
|
||||
cid=$output
|
||||
run_buildah mount ${cid}
|
||||
root=$output
|
||||
if test "$compat" != "" ; then
|
||||
# true, /vol/subvol should not have contents, and its permissions should be the default 0755
|
||||
test -d $root/vol/subvol
|
||||
test ! -s $root/vol/subvol/subvolfile
|
||||
run stat -c %a $root/vol/subvol
|
||||
assert "$status" -eq 0 "status code from stat $root/vol/subvol"
|
||||
expect_output "755" "stat($root/vol/subvol)"
|
||||
else
|
||||
# true, /vol/subvol should have contents, and its permissions should be the changed 0711
|
||||
test -d $root/vol/subvol
|
||||
test -s $root/vol/subvol/subvolfile
|
||||
run stat -c %a $root/vol/subvol
|
||||
assert "$status" -eq 0 "status code from stat $root/vol/subvol"
|
||||
expect_output "711" "stat($root/vol/subvol)"
|
||||
fi
|
||||
run_buildah rm ${cid}
|
||||
run_buildah rmi ${target}
|
||||
done
|
||||
done
|
||||
}
|
||||
|
||||
@test "bud-volume-ownership" {
|
||||
|
|
|
@ -2,23 +2,27 @@ FROM alpine
|
|||
RUN mkdir -p /vol/subvol/subsubvol
|
||||
RUN dd if=/dev/zero bs=512 count=1 of=/vol/subvol/subsubvol/subsubvolfile
|
||||
VOLUME /vol/subvol
|
||||
# At this point, the contents below /vol/subvol should be frozen.
|
||||
# At this point, the contents below /vol/subvol may be frozen, so try to create
|
||||
# something that will be discarded if it was.
|
||||
RUN dd if=/dev/zero bs=512 count=1 of=/vol/subvol/subvolfile
|
||||
# In particular, /vol/subvol/subvolfile should be wiped out.
|
||||
# In particular, /vol/subvol/subvolfile should be wiped out if --compat-volumes
|
||||
# behavior was selected.
|
||||
RUN dd if=/dev/zero bs=512 count=1 of=/vol/volfile
|
||||
# However, /vol/volfile should exist.
|
||||
# However, /vol/volfile should always exist, since /vol was not a volume, but
|
||||
# we're making it one here.
|
||||
VOLUME /vol
|
||||
# And this should be redundant.
|
||||
VOLUME /vol/subvol
|
||||
# And now we've frozen /vol.
|
||||
# And now that we've frozen /vol, --compat-volumes should make this disappear,
|
||||
# too.
|
||||
RUN dd if=/dev/zero bs=512 count=1 of=/vol/anothervolfile
|
||||
# Which means that in the image we're about to commit, /vol/anothervolfile
|
||||
# shouldn't exist, either.
|
||||
|
||||
# ADD files which should persist.
|
||||
# ADD files which should persist, regardless of the --compat-volumes setting.
|
||||
ADD Dockerfile /vol/Dockerfile
|
||||
RUN stat /vol/Dockerfile
|
||||
ADD Dockerfile /vol/Dockerfile2
|
||||
RUN stat /vol/Dockerfile2
|
||||
# We should still be saving and restoring volume caches.
|
||||
|
||||
# This directory should still exist, since we cached /vol once it was declared
|
||||
# a VOLUME, and /vol/subvol was created before that (as a VOLUME, but still).
|
||||
RUN dd if=/dev/zero bs=512 count=1 of=/vol/subvol/subvolfile
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
FROM alpine
|
||||
VOLUME /vol/subvol
|
||||
# At this point, the directory should exist, with default permissions 0755, the
|
||||
# contents below /vol/subvol should be frozen, and we shouldn't get an error
|
||||
# from trying to write to it because we it was created automatically.
|
||||
# At this point, the directory should exist, and it should have default
|
||||
# permissions 0755, and we shouldn't get an error from trying to write to it
|
||||
# because we it was created automatically. If this image is built with the
|
||||
# --compat-volumes flag, everything done after this point will be discarded.
|
||||
RUN chmod 0711 /vol/subvol
|
||||
RUN dd if=/dev/zero bs=512 count=1 of=/vol/subvol/subvolfile
|
||||
|
|
Loading…
Reference in New Issue