imagebuildah: make traditional volume handling not the default
Make the traditional handling of volumes (where they're "frozen" and can only be modified by ADD or COPY, which requires that we cache their contents and save/restore them before/after RUN instructions) an option that is not enabled by default. Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
This commit is contained in:
parent
b5eafdf55b
commit
a7098c776a
|
@ -370,4 +370,8 @@ type BuildOptions struct {
|
|||
// configuration when committing in Docker format. Newer
|
||||
// BuildKit-based docker build doesn't set this field.
|
||||
CompatSetParent types.OptionalBool
|
||||
// CompatVolumes causes the contents of locations marked as volumes in
|
||||
// base images or by a VOLUME instruction to be preserved during RUN
|
||||
// instructions. Newer BuildKit-based docker build doesn't bother.
|
||||
CompatVolumes types.OptionalBool
|
||||
}
|
||||
|
|
|
@ -197,6 +197,14 @@ The configured value can be "" (the empty string) or "private" to indicate
|
|||
that a new cgroup namespace should be created, or it can be "host" to indicate
|
||||
that the cgroup namespace in which `buildah` itself is being run should be reused.
|
||||
|
||||
**--compat-volumes**
|
||||
|
||||
Handle directories marked using the VOLUME instruction (both in this build, and
|
||||
those inherited from base images) such that their contents can only be modified
|
||||
by ADD and COPY instructions. Any changes made in those locations by RUN
|
||||
instructions will be reverted. Before the introduction of this option, this
|
||||
behavior was the default, but it is now disabled by default.
|
||||
|
||||
**--compress**
|
||||
|
||||
This option is added to be aligned with other containers CLIs.
|
||||
|
|
|
@ -161,6 +161,7 @@ type Executor struct {
|
|||
sbomScanOptions []define.SBOMScanOptions
|
||||
cdiConfigDir string
|
||||
compatSetParent types.OptionalBool
|
||||
compatVolumes types.OptionalBool
|
||||
}
|
||||
|
||||
type imageTypeAndHistoryAndDiffIDs struct {
|
||||
|
@ -318,6 +319,7 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o
|
|||
sbomScanOptions: options.SBOMScanOptions,
|
||||
cdiConfigDir: options.CDIConfigDir,
|
||||
compatSetParent: options.CompatSetParent,
|
||||
compatVolumes: options.CompatVolumes,
|
||||
}
|
||||
if exec.err == nil {
|
||||
exec.err = os.Stderr
|
||||
|
|
|
@ -44,6 +44,7 @@ import (
|
|||
"github.com/openshift/imagebuilder/dockerfile/command"
|
||||
"github.com/openshift/imagebuilder/dockerfile/parser"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
// StageExecutor bundles up what we need to know when executing one stage of a
|
||||
|
@ -80,42 +81,22 @@ type StageExecutor struct {
|
|||
// Preserve informs the stage executor that from this point on, it needs to
|
||||
// ensure that only COPY and ADD instructions can modify the contents of this
|
||||
// directory or anything below it.
|
||||
// The StageExecutor handles this by caching the contents of directories which
|
||||
// have been marked this way before executing a RUN instruction, invalidating
|
||||
// that cache when an ADD or COPY instruction sets any location under the
|
||||
// directory as the destination, and using the cache to reset the contents of
|
||||
// the directory tree after processing each RUN instruction.
|
||||
// When CompatVolumes is enabled, the StageExecutor handles this by caching the
|
||||
// contents of directories which have been marked this way before executing a
|
||||
// RUN instruction, invalidating that cache when an ADD or COPY instruction
|
||||
// sets any location under the directory as the destination, and using the
|
||||
// cache to reset the contents of the directory tree after processing each RUN
|
||||
// instruction.
|
||||
// It would be simpler if we could just mark the directory as a read-only bind
|
||||
// mount of itself during Run(), but the directory is expected to be remain
|
||||
// writeable while the RUN instruction is being handled, even if any changes
|
||||
// made within the directory are ultimately discarded.
|
||||
func (s *StageExecutor) Preserve(path string) error {
|
||||
logrus.Debugf("PRESERVE %q", path)
|
||||
if s.volumes.Covers(path) {
|
||||
// This path is already a subdirectory of a volume path that
|
||||
// we're already preserving, so there's nothing new to be done
|
||||
// except ensure that it exists.
|
||||
createdDirPerms := os.FileMode(0755)
|
||||
if err := copier.Mkdir(s.mountPoint, filepath.Join(s.mountPoint, path), copier.MkdirOptions{ChmodNew: &createdDirPerms}); err != nil {
|
||||
return fmt.Errorf("ensuring volume path exists: %w", err)
|
||||
}
|
||||
if err := s.volumeCacheInvalidate(path); err != nil {
|
||||
return fmt.Errorf("ensuring volume path %q is preserved: %w", filepath.Join(s.mountPoint, path), err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// Figure out where the cache for this volume would be stored.
|
||||
s.preserved++
|
||||
cacheDir, err := s.executor.store.ContainerDirectory(s.builder.ContainerID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to locate temporary directory for container")
|
||||
}
|
||||
cacheFile := filepath.Join(cacheDir, fmt.Sprintf("volume%d.tar", s.preserved))
|
||||
// Save info about the top level of the location that we'll be archiving.
|
||||
var archivedPath string
|
||||
logrus.Debugf("PRESERVE %q in %q", path, s.builder.ContainerID)
|
||||
|
||||
// Try and resolve the symlink (if one exists)
|
||||
// Set archivedPath and path based on whether a symlink is found or not
|
||||
var archivedPath string
|
||||
if evaluated, err := copier.Eval(s.mountPoint, filepath.Join(s.mountPoint, path), copier.EvalOptions{}); err == nil {
|
||||
symLink, err := filepath.Rel(s.mountPoint, evaluated)
|
||||
if err != nil {
|
||||
|
@ -130,9 +111,55 @@ func (s *StageExecutor) Preserve(path string) error {
|
|||
return fmt.Errorf("evaluating path %q: %w", path, err)
|
||||
}
|
||||
|
||||
const createdDirPerms = os.FileMode(0o755)
|
||||
if s.executor.compatVolumes != types.OptionalBoolTrue {
|
||||
logrus.Debugf("ensuring volume path %q exists", path)
|
||||
createdDirPerms := createdDirPerms
|
||||
if err := copier.Mkdir(s.mountPoint, archivedPath, copier.MkdirOptions{ChmodNew: &createdDirPerms}); err != nil {
|
||||
return fmt.Errorf("ensuring volume path exists: %w", err)
|
||||
}
|
||||
logrus.Debugf("not doing volume save-and-restore of %q in %q", path, s.builder.ContainerID)
|
||||
return nil
|
||||
}
|
||||
|
||||
if s.volumes.Covers(path) {
|
||||
// This path is a subdirectory of a volume path that we're
|
||||
// already preserving, so there's nothing new to be done except
|
||||
// ensure that it exists.
|
||||
st, err := os.Stat(archivedPath)
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
// We do have to create it. That means it's not in any
|
||||
// cached copy of the path that covers it, so we have
|
||||
// to invalidate such cached copy.
|
||||
logrus.Debugf("have to create volume %q", path)
|
||||
createdDirPerms := createdDirPerms
|
||||
if err := copier.Mkdir(s.mountPoint, filepath.Join(s.mountPoint, path), copier.MkdirOptions{ChmodNew: &createdDirPerms}); err != nil {
|
||||
return fmt.Errorf("ensuring volume path exists: %w", err)
|
||||
}
|
||||
if err := s.volumeCacheInvalidate(path); err != nil {
|
||||
return fmt.Errorf("ensuring volume path %q is preserved: %w", filepath.Join(s.mountPoint, path), err)
|
||||
}
|
||||
if st, err = os.Stat(archivedPath); err != nil {
|
||||
return fmt.Errorf("checking on just-created volume path: %w", err)
|
||||
}
|
||||
}
|
||||
s.volumeCacheInfo[path] = st
|
||||
return nil
|
||||
}
|
||||
|
||||
// Figure out where the cache for this volume would be stored.
|
||||
s.preserved++
|
||||
cacheDir, err := s.executor.store.ContainerDirectory(s.builder.ContainerID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to locate temporary directory for container")
|
||||
}
|
||||
cacheFile := filepath.Join(cacheDir, fmt.Sprintf("volume%d.tar", s.preserved))
|
||||
|
||||
// Save info about the top level of the location that we'll be archiving.
|
||||
st, err := os.Stat(archivedPath)
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
createdDirPerms := os.FileMode(0755)
|
||||
logrus.Debugf("have to create volume %q", path)
|
||||
createdDirPerms := os.FileMode(0o755)
|
||||
if err = copier.Mkdir(s.mountPoint, archivedPath, copier.MkdirOptions{ChmodNew: &createdDirPerms}); err != nil {
|
||||
return fmt.Errorf("ensuring volume path exists: %w", err)
|
||||
}
|
||||
|
@ -145,11 +172,13 @@ func (s *StageExecutor) Preserve(path string) error {
|
|||
s.volumeCacheInfo[path] = st
|
||||
if !s.volumes.Add(path) {
|
||||
// This path is not a subdirectory of a volume path that we're
|
||||
// already preserving, so adding it to the list should work.
|
||||
// already preserving, so adding it to the list should have
|
||||
// worked.
|
||||
return fmt.Errorf("adding %q to the volume cache", path)
|
||||
}
|
||||
s.volumeCache[path] = cacheFile
|
||||
// Now prune cache files for volumes that are now supplanted by this one.
|
||||
|
||||
// Now prune cache files for volumes that are newly supplanted by this one.
|
||||
removed := []string{}
|
||||
for cachedPath := range s.volumeCache {
|
||||
// Walk our list of cached volumes, and check that they're
|
||||
|
@ -168,6 +197,7 @@ func (s *StageExecutor) Preserve(path string) error {
|
|||
removed = append(removed, cachedPath)
|
||||
}
|
||||
}
|
||||
|
||||
// Actually remove the caches that we decided to remove.
|
||||
for _, cachedPath := range removed {
|
||||
archivedPath := filepath.Join(s.mountPoint, cachedPath)
|
||||
|
@ -772,32 +802,33 @@ func (s *StageExecutor) Run(run imagebuilder.Run, config docker.Config) error {
|
|||
}
|
||||
namespaceOptions := append([]define.NamespaceOption{}, s.executor.namespaceOptions...)
|
||||
options := buildah.RunOptions{
|
||||
Args: s.executor.runtimeArgs,
|
||||
Cmd: config.Cmd,
|
||||
ContextDir: s.executor.contextDir,
|
||||
ConfigureNetwork: s.executor.configureNetwork,
|
||||
Entrypoint: config.Entrypoint,
|
||||
Env: config.Env,
|
||||
Hostname: config.Hostname,
|
||||
Logger: s.executor.logger,
|
||||
Mounts: s.executor.transientMounts,
|
||||
NamespaceOptions: namespaceOptions,
|
||||
NoHostname: s.executor.noHostname,
|
||||
NoHosts: s.executor.noHosts,
|
||||
NoPivot: os.Getenv("BUILDAH_NOPIVOT") != "",
|
||||
Quiet: s.executor.quiet,
|
||||
RunMounts: run.Mounts,
|
||||
Runtime: s.executor.runtime,
|
||||
Secrets: s.executor.secrets,
|
||||
SSHSources: s.executor.sshsources,
|
||||
StageMountPoints: stageMountPoints,
|
||||
Stderr: s.executor.err,
|
||||
Stdin: stdin,
|
||||
Stdout: s.executor.out,
|
||||
SystemContext: s.executor.systemContext,
|
||||
Terminal: buildah.WithoutTerminal,
|
||||
User: config.User,
|
||||
WorkingDir: config.WorkingDir,
|
||||
Args: s.executor.runtimeArgs,
|
||||
Cmd: config.Cmd,
|
||||
ContextDir: s.executor.contextDir,
|
||||
ConfigureNetwork: s.executor.configureNetwork,
|
||||
Entrypoint: config.Entrypoint,
|
||||
Env: config.Env,
|
||||
Hostname: config.Hostname,
|
||||
Logger: s.executor.logger,
|
||||
Mounts: slices.Clone(s.executor.transientMounts),
|
||||
NamespaceOptions: namespaceOptions,
|
||||
NoHostname: s.executor.noHostname,
|
||||
NoHosts: s.executor.noHosts,
|
||||
NoPivot: os.Getenv("BUILDAH_NOPIVOT") != "",
|
||||
Quiet: s.executor.quiet,
|
||||
CompatBuiltinVolumes: types.OptionalBoolFalse,
|
||||
RunMounts: run.Mounts,
|
||||
Runtime: s.executor.runtime,
|
||||
Secrets: s.executor.secrets,
|
||||
SSHSources: s.executor.sshsources,
|
||||
StageMountPoints: stageMountPoints,
|
||||
Stderr: s.executor.err,
|
||||
Stdin: stdin,
|
||||
Stdout: s.executor.out,
|
||||
SystemContext: s.executor.systemContext,
|
||||
Terminal: buildah.WithoutTerminal,
|
||||
User: config.User,
|
||||
WorkingDir: config.WorkingDir,
|
||||
}
|
||||
|
||||
// Honor `RUN --network=<>`.
|
||||
|
@ -824,20 +855,40 @@ func (s *StageExecutor) Run(run imagebuilder.Run, config docker.Config) error {
|
|||
args = append([]string{"/bin/sh", "-c"}, args...)
|
||||
}
|
||||
}
|
||||
mounts, err := s.volumeCacheSave()
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
if s.executor.compatVolumes == types.OptionalBoolTrue {
|
||||
// Only bother with saving/restoring the contents of volumes if
|
||||
// we've been specifically asked to.
|
||||
mounts, err := s.volumeCacheSave()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
options.Mounts = append(options.Mounts, mounts...)
|
||||
}
|
||||
options.Mounts = append(options.Mounts, mounts...)
|
||||
|
||||
// The list of built-in volumes isn't passed in via RunOptions, so make
|
||||
// sure the builder's list of built-in volumes includes anything that
|
||||
// the configuration thinks is a built-in volume.
|
||||
s.builder.ClearVolumes()
|
||||
for v := range config.Volumes {
|
||||
s.builder.AddVolume(v)
|
||||
}
|
||||
|
||||
if len(heredocMounts) > 0 {
|
||||
options.Mounts = append(options.Mounts, heredocMounts...)
|
||||
}
|
||||
err = s.builder.Run(args, options)
|
||||
if err2 := s.volumeCacheRestore(); err2 != nil {
|
||||
if err == nil {
|
||||
return err2
|
||||
|
||||
if s.executor.compatVolumes == types.OptionalBoolTrue {
|
||||
// Only bother with saving/restoring the contents of volumes if
|
||||
// we've been specifically asked to.
|
||||
if err2 := s.volumeCacheRestore(); err2 != nil {
|
||||
if err == nil {
|
||||
return err2
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -361,6 +361,7 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) (
|
|||
CDIConfigDir: iopts.CDIConfigDir,
|
||||
CNIConfigDir: iopts.CNIConfigDir,
|
||||
CNIPluginPath: iopts.CNIPlugInPath,
|
||||
CompatVolumes: types.NewOptionalBool(iopts.CompatVolumes),
|
||||
ConfidentialWorkload: confidentialWorkloadOptions,
|
||||
CPPFlags: iopts.CPPFlags,
|
||||
CommonBuildOpts: commonOpts,
|
||||
|
|
|
@ -119,6 +119,7 @@ type BudResults struct {
|
|||
OSVersion string
|
||||
CWOptions string
|
||||
SBOMOptions []string
|
||||
CompatVolumes bool
|
||||
}
|
||||
|
||||
// FromAndBugResults represents the results for common flags
|
||||
|
@ -228,6 +229,7 @@ func GetBudFlags(flags *BudResults) pflag.FlagSet {
|
|||
fs.StringVar(&flags.CacheTTL, "cache-ttl", "", "only consider cache images under specified duration.")
|
||||
fs.StringVar(&flags.CertDir, "cert-dir", "", "use certificates at the specified path to access the registry")
|
||||
fs.BoolVar(&flags.Compress, "compress", false, "this is a legacy option, which has no effect on the image")
|
||||
fs.BoolVar(&flags.CompatVolumes, "compat-volumes", false, "preserve the contents of VOLUMEs during RUN instructions")
|
||||
fs.StringArrayVar(&flags.CPPFlags, "cpp-flag", []string{}, "set additional flag to pass to C preprocessor (cpp)")
|
||||
fs.StringVar(&flags.Creds, "creds", "", "use `[username[:password]]` for accessing the registry")
|
||||
fs.StringVarP(&flags.CWOptions, "cw", "", "", "confidential workload `options`")
|
||||
|
|
6
run.go
6
run.go
|
@ -170,6 +170,12 @@ type RunOptions struct {
|
|||
// CDIConfigDir is the location of CDI configuration files, if the files in
|
||||
// the default configuration locations shouldn't be used.
|
||||
CDIConfigDir string
|
||||
// CompatBuiltinVolumes causes the contents of locations marked as
|
||||
// volumes in the container's configuration to be set up as bind mounts to
|
||||
// directories which are not in the container's rootfs, hiding changes
|
||||
// made to contents of those changes when the container is subsequently
|
||||
// committed.
|
||||
CompatBuiltinVolumes types.OptionalBool
|
||||
}
|
||||
|
||||
// RunMountArtifacts are the artifacts created when using a run mount.
|
||||
|
|
|
@ -39,6 +39,7 @@ import (
|
|||
netUtil "github.com/containers/common/libnetwork/util"
|
||||
"github.com/containers/common/pkg/config"
|
||||
"github.com/containers/common/pkg/subscriptions"
|
||||
"github.com/containers/image/v5/types"
|
||||
imageTypes "github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
|
@ -1304,7 +1305,7 @@ func init() {
|
|||
}
|
||||
|
||||
// If this succeeds, the caller must call cleanupMounts().
|
||||
func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath string, optionMounts []specs.Mount, bindFiles map[string]string, builtinVolumes, volumeMounts []string, runFileMounts []string, runMountInfo runMountInfo) (*runMountArtifacts, error) {
|
||||
func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath string, optionMounts []specs.Mount, bindFiles map[string]string, builtinVolumes []string, compatBuiltinVolumes types.OptionalBool, volumeMounts []string, runFileMounts []string, runMountInfo runMountInfo) (*runMountArtifacts, error) {
|
||||
// Start building a new list of mounts.
|
||||
var mounts []specs.Mount
|
||||
haveMount := func(destination string) bool {
|
||||
|
@ -1374,7 +1375,7 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath st
|
|||
}()
|
||||
// Add temporary copies of the contents of volume locations at the
|
||||
// volume locations, unless we already have something there.
|
||||
builtins, err := runSetupBuiltinVolumes(b.MountLabel, mountPoint, cdir, builtinVolumes, int(rootUID), int(rootGID))
|
||||
builtins, err := runSetupBuiltinVolumes(b.MountLabel, mountPoint, cdir, builtinVolumes, compatBuiltinVolumes, int(rootUID), int(rootGID))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -1411,17 +1412,31 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath st
|
|||
return mountArtifacts, nil
|
||||
}
|
||||
|
||||
func runSetupBuiltinVolumes(mountLabel, mountPoint, containerDir string, builtinVolumes []string, rootUID, rootGID int) ([]specs.Mount, error) {
|
||||
func runSetupBuiltinVolumes(mountLabel, mountPoint, containerDir string, builtinVolumes []string, compatBuiltinVolumes types.OptionalBool, rootUID, rootGID int) ([]specs.Mount, error) {
|
||||
var mounts []specs.Mount
|
||||
hostOwner := idtools.IDPair{UID: rootUID, GID: rootGID}
|
||||
// Add temporary copies of the contents of volume locations at the
|
||||
// volume locations, unless we already have something there.
|
||||
for _, volume := range builtinVolumes {
|
||||
volumePath := filepath.Join(containerDir, "buildah-volumes", digest.Canonical.FromString(volume).Hex())
|
||||
initializeVolume := false
|
||||
// Make sure the volume exists in the rootfs.
|
||||
createDirPerms := os.FileMode(0o755)
|
||||
err := copier.Mkdir(mountPoint, filepath.Join(mountPoint, volume), copier.MkdirOptions{
|
||||
ChownNew: &hostOwner,
|
||||
ChmodNew: &createDirPerms,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("ensuring volume path %q: %w", filepath.Join(mountPoint, volume), err)
|
||||
}
|
||||
// If we're not being asked to bind mount anonymous volumes
|
||||
// onto the volume paths, we're done here.
|
||||
if compatBuiltinVolumes != types.OptionalBoolTrue {
|
||||
continue
|
||||
}
|
||||
// If we need to, create the directory that we'll use to hold
|
||||
// the volume contents. If we do need to create it, then we'll
|
||||
// need to populate it, too, so make a note of that.
|
||||
volumePath := filepath.Join(containerDir, "buildah-volumes", digest.Canonical.FromString(volume).Hex())
|
||||
initializeVolume := false
|
||||
if err := fileutils.Exists(volumePath); err != nil {
|
||||
if !errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, err
|
||||
|
@ -1435,15 +1450,7 @@ func runSetupBuiltinVolumes(mountLabel, mountPoint, containerDir string, builtin
|
|||
}
|
||||
initializeVolume = true
|
||||
}
|
||||
// Make sure the volume exists in the rootfs and read its attributes.
|
||||
createDirPerms := os.FileMode(0755)
|
||||
err := copier.Mkdir(mountPoint, filepath.Join(mountPoint, volume), copier.MkdirOptions{
|
||||
ChownNew: &hostOwner,
|
||||
ChmodNew: &createDirPerms,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("ensuring volume path %q: %w", filepath.Join(mountPoint, volume), err)
|
||||
}
|
||||
// Read the attributes of the volume's location in the rootfs.
|
||||
srcPath, err := copier.Eval(mountPoint, filepath.Join(mountPoint, volume), copier.EvalOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("evaluating path %q: %w", srcPath, err)
|
||||
|
|
|
@ -259,7 +259,7 @@ func (b *Builder) Run(command []string, options RunOptions) error {
|
|||
SystemContext: options.SystemContext,
|
||||
}
|
||||
|
||||
runArtifacts, err := b.setupMounts(mountPoint, spec, path, options.Mounts, bindFiles, volumes, b.CommonBuildOpts.Volumes, options.RunMounts, runMountInfo)
|
||||
runArtifacts, err := b.setupMounts(mountPoint, spec, path, options.Mounts, bindFiles, volumes, options.CompatBuiltinVolumes, b.CommonBuildOpts.Volumes, options.RunMounts, runMountInfo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("resolving mountpoints for container %q: %w", b.ContainerID, err)
|
||||
}
|
||||
|
|
|
@ -475,7 +475,7 @@ rootless=%d
|
|||
SystemContext: options.SystemContext,
|
||||
}
|
||||
|
||||
runArtifacts, err := b.setupMounts(mountPoint, spec, path, options.Mounts, bindFiles, volumes, b.CommonBuildOpts.Volumes, options.RunMounts, runMountInfo)
|
||||
runArtifacts, err := b.setupMounts(mountPoint, spec, path, options.Mounts, bindFiles, volumes, options.CompatBuiltinVolumes, b.CommonBuildOpts.Volumes, options.RunMounts, runMountInfo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("resolving mountpoints for container %q: %w", b.ContainerID, err)
|
||||
}
|
||||
|
|
|
@ -2466,7 +2466,7 @@ _EOF
|
|||
|
||||
_prefetch alpine
|
||||
target=volume-image
|
||||
run_buildah build $WITH_POLICY_JSON -t ${target} $BUDFILES/preserve-volumes
|
||||
run_buildah build $WITH_POLICY_JSON -t ${target} --compat-volumes $BUDFILES/preserve-volumes
|
||||
run_buildah from --quiet ${target}
|
||||
cid=$output
|
||||
run_buildah mount ${cid}
|
||||
|
@ -2699,7 +2699,7 @@ function validate_instance_compression {
|
|||
|
||||
_prefetch alpine
|
||||
target=volume-image
|
||||
run_buildah build $WITH_POLICY_JSON -t ${target} $BUDFILES/volume-perms
|
||||
run_buildah build $WITH_POLICY_JSON -t ${target} --compat-volumes=true $BUDFILES/volume-perms
|
||||
run_buildah from --quiet $WITH_POLICY_JSON ${target}
|
||||
cid=$output
|
||||
run_buildah mount ${cid}
|
||||
|
|
Loading…
Reference in New Issue