internal/volume.GetBindMount(): discard writes in bind mounts

When handling RUN --mount=type=bind, where the mount is read-write,
instead of a simple bind mount, create an overlay mount with an upper
directory that will be discarded after the overlay mount is unmounted.
This brings us in line with the expected behavior, wherein writes to
bind mounts should be discarded.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
Signed-off-by: Vivek Naruka <vnaruka@redhat.com>
This commit is contained in:
Nalin Dahyabhai 2024-11-06 17:22:49 -05:00 committed by Chris Evich
parent b9b2969bb6
commit 432cf3cf6b
No known key found for this signature in database
GPG Key ID: 03EDC70FD578067F
11 changed files with 349 additions and 152 deletions

View File

@ -8,7 +8,9 @@ import (
"github.com/containers/buildah"
internalParse "github.com/containers/buildah/internal/parse"
internalUtil "github.com/containers/buildah/internal/util"
buildahcli "github.com/containers/buildah/pkg/cli"
"github.com/containers/buildah/pkg/overlay"
"github.com/containers/buildah/pkg/parse"
"github.com/containers/buildah/util"
"github.com/opencontainers/runtime-spec/specs-go"
@ -101,6 +103,16 @@ func runCmd(c *cobra.Command, args []string, iopts runInputOptions) error {
return errors.New("command must be specified")
}
tmpDir, err := os.MkdirTemp(internalUtil.GetTempDir(), "buildahvolume")
if err != nil {
return fmt.Errorf("creating temporary directory: %w", err)
}
defer func() {
if err := os.Remove(tmpDir); err != nil {
logrus.Debugf("removing should-be-empty temporary directory %q: %v", tmpDir, err)
}
}()
store, err := getStore(c)
if err != nil {
return err
@ -168,14 +180,23 @@ func runCmd(c *cobra.Command, args []string, iopts runInputOptions) error {
if err != nil {
return fmt.Errorf("building system context: %w", err)
}
mounts, mountedImages, targetLocks, err := internalParse.GetVolumes(systemContext, store, iopts.volumes, iopts.mounts, iopts.contextDir, iopts.workingDir)
mounts, mountedImages, _, targetLocks, err := internalParse.GetVolumes(systemContext, store, builder.MountLabel, iopts.volumes, iopts.mounts, iopts.contextDir, iopts.workingDir, tmpDir)
if err != nil {
return err
}
defer internalParse.UnlockLockArray(targetLocks)
defer func() {
if err := overlay.CleanupContent(tmpDir); err != nil {
logrus.Debugf("unmounting overlay mounts under %q: %v", tmpDir, err)
}
for _, mountedImage := range mountedImages {
if _, err := store.UnmountImage(mountedImage, false); err != nil {
logrus.Debugf("unmounting image %q: %v", mountedImage, err)
}
}
// unlock if any locked files from this RUN statement
internalParse.UnlockLockArray(targetLocks)
}()
options.Mounts = mounts
// Run() will automatically clean them up.
options.ExternalImageMounts = mountedImages
options.CgroupManager = globalFlagResults.CgroupManager
runerr := builder.Run(args, options)

View File

@ -99,7 +99,7 @@ BUILDAH\_ISOLATION environment variable. `export BUILDAH_ISOLATION=oci`
Attach a filesystem mount to the container
Current supported mount TYPES are bind, cache, secret and tmpfs. <sup>[[1]](#Footnote1)</sup>
Current supported mount TYPES are bind, cache, secret and tmpfs. Writes to `bind` and `tmpfs` mounts are discarded after the command finishes, while changes to `cache` mounts persist across uses. <sup>[[1]](#Footnote1)</sup>
e.g.
@ -111,11 +111,11 @@ Current supported mount TYPES are bind, cache, secret and tmpfs. <sup>[[1]](#Foo
Common Options:
· src, source: mount source spec for bind and volume. Mandatory for bind. If `from` is specified, `src` is the subpath in the `from` field.
· src, source: mount source spec for bind and cache. Mandatory for bind. If `from` is specified, `src` is the subpath in the `from` field.
· dst, destination, target: mount destination spec.
· dst, destination, target: location where the command being run should see the content being mounted.
· ro, read-only: true or false (default).
· ro, read-only: (default true for `type=bind`, false for `type=tmpfs`, `type=cache`).
Options specific to bind:
@ -123,7 +123,7 @@ Current supported mount TYPES are bind, cache, secret and tmpfs. <sup>[[1]](#Foo
. bind-nonrecursive: do not setup a recursive bind mount. By default it is recursive.
· from: stage or image name for the root of the source. Defaults to the build context.
· from: image name for the root of the source. Defaults to **--contextdir**, mandatory if **--contextdir** was not specified.
· z: Set shared SELinux label on mounted destination. Use if SELinux is enabled on host machine.
@ -143,7 +143,7 @@ Current supported mount TYPES are bind, cache, secret and tmpfs. <sup>[[1]](#Foo
Options specific to cache:
· id: Create a separate cache directory for a particular id.
· id: Distinguish this cache from other caches using this ID rather than the target mount path.
· mode: File mode for new cache directory in octal. Default 0755.
@ -157,6 +157,9 @@ Current supported mount TYPES are bind, cache, secret and tmpfs. <sup>[[1]](#Foo
· z: Set shared SELinux label on mounted destination. Enabled by default if SELinux is enabled on the host machine.
· sharing: Whether other users of this cache need to wait for this command to complete (`sharing=locked`) or not (`sharing=shared`, which is the default).
· Z: Set private SELinux label on mounted destination. Use if SELinux is enabled on host machine.
**--network**, **--net**=*mode*

View File

@ -11,10 +11,11 @@ import (
"errors"
"github.com/containers/buildah/copier"
"github.com/containers/buildah/copier"
"github.com/containers/buildah/define"
"github.com/containers/buildah/internal"
internalUtil "github.com/containers/buildah/internal/util"
"github.com/containers/buildah/pkg/overlay"
"github.com/containers/common/pkg/parse"
"github.com/containers/image/v5/types"
"github.com/containers/storage"
@ -24,6 +25,7 @@ import (
digest "github.com/opencontainers/go-digest"
specs "github.com/opencontainers/runtime-spec/specs-go"
selinux "github.com/opencontainers/selinux/go-selinux"
"github.com/sirupsen/logrus"
)
const (
@ -49,16 +51,76 @@ var (
errDuplicateDest = errors.New("duplicate mount destination")
)
func mountIsReadWrite(m specs.Mount) bool {
// in case of conflicts, the last one wins, so it's not enough
// to check for the presence of either "rw" or "ro" anywhere
// with e.g. slices.Contains()
rw := true
for _, option := range m.Options {
switch option {
case "rw":
rw = true
case "ro":
rw = false
}
}
return rw
}
func convertToOverlay(m specs.Mount, store storage.Store, mountLabel, tmpDir string, uid, gid int) (specs.Mount, string, error) {
overlayDir, err := overlay.TempDir(tmpDir, uid, gid)
if err != nil {
return specs.Mount{}, "", fmt.Errorf("setting up overlay for %q: %w", m.Destination, err)
}
graphOptions := store.GraphOptions()
graphOptsCopy := make([]string, len(graphOptions))
copy(graphOptsCopy, graphOptions)
options := overlay.Options{GraphOpts: graphOptsCopy, ForceMount: true, MountLabel: mountLabel}
fileInfo, err := os.Stat(m.Source)
if err != nil {
return specs.Mount{}, "", fmt.Errorf("setting up overlay of %q: %w", m.Source, err)
}
// we might be trying to "overlay" for a non-directory, and the kernel doesn't like that very much
var mountThisInstead specs.Mount
if fileInfo.IsDir() {
// do the normal thing of mounting this directory as a lower with a temporary upper
mountThisInstead, err = overlay.MountWithOptions(overlayDir, m.Source, m.Destination, &options)
if err != nil {
return specs.Mount{}, "", fmt.Errorf("setting up overlay of %q: %w", m.Source, err)
}
} else {
// mount the parent directory as the lower with a temporary upper, and return a
// bind mount from the non-directory in the merged directory to the destination
sourceDir := filepath.Dir(m.Source)
sourceBase := filepath.Base(m.Source)
destination := m.Destination
mountedOverlay, err := overlay.MountWithOptions(overlayDir, sourceDir, destination, &options)
if err != nil {
return specs.Mount{}, "", fmt.Errorf("setting up overlay of %q: %w", sourceDir, err)
}
if mountedOverlay.Type != define.TypeBind {
if err2 := overlay.RemoveTemp(overlayDir); err2 != nil {
return specs.Mount{}, "", fmt.Errorf("cleaning up after failing to set up overlay: %v, while setting up overlay for %q: %w", err2, destination, err)
}
return specs.Mount{}, "", fmt.Errorf("setting up overlay for %q at %q: %w", mountedOverlay.Source, destination, err)
}
mountThisInstead = mountedOverlay
mountThisInstead.Source = filepath.Join(mountedOverlay.Source, sourceBase)
mountThisInstead.Destination = destination
}
return mountThisInstead, overlayDir, nil
}
// GetBindMount parses a single bind mount entry from the --mount flag.
// Returns specifiedMount and a string which contains name of image that we mounted otherwise its empty.
// Caller is expected to perform unmount of any mounted images
func GetBindMount(ctx *types.SystemContext, args []string, contextDir string, store storage.Store, imageMountLabel string, additionalMountPoints map[string]internal.StageMountDetails, workDir string) (specs.Mount, string, error) {
func GetBindMount(sys *types.SystemContext, args []string, contextDir string, store storage.Store, mountLabel string, additionalMountPoints map[string]internal.StageMountDetails, workDir, tmpDir string) (specs.Mount, string, string, error) {
newMount := specs.Mount{
Type: define.TypeBind,
}
mountReadability := false
setDest := false
mountReadability := ""
setDest := ""
bindNonRecursive := false
fromImage := ""
@ -71,64 +133,69 @@ func GetBindMount(ctx *types.SystemContext, args []string, contextDir string, st
case "bind-nonrecursive":
newMount.Options = append(newMount.Options, "bind")
bindNonRecursive = true
case "ro", "nosuid", "nodev", "noexec":
case "nosuid", "nodev", "noexec":
// TODO: detect duplication of these options.
// (Is this necessary?)
newMount.Options = append(newMount.Options, kv[0])
mountReadability = true
case "rw", "readwrite":
newMount.Options = append(newMount.Options, "rw")
mountReadability = true
case "readonly":
// Alias for "ro"
mountReadability = "rw"
case "ro", "readonly":
newMount.Options = append(newMount.Options, "ro")
mountReadability = true
mountReadability = "ro"
case "shared", "rshared", "private", "rprivate", "slave", "rslave", "Z", "z", "U":
newMount.Options = append(newMount.Options, kv[0])
case "from":
if len(kv) == 1 {
return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
return newMount, "", "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
}
fromImage = kv[1]
case "bind-propagation":
if len(kv) == 1 {
return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
return newMount, "", "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
}
switch kv[1] {
default:
return newMount, "", "", fmt.Errorf("%v: %q: %w", kv[0], kv[1], errBadMntOption)
case "shared", "rshared", "private", "rprivate", "slave", "rslave":
// this should be the relevant parts of the same list of options we accepted above
}
newMount.Options = append(newMount.Options, kv[1])
case "src", "source":
if len(kv) == 1 {
return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
return newMount, "", "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
}
newMount.Source = kv[1]
case "target", "dst", "destination":
if len(kv) == 1 {
return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
return newMount, "", "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
}
targetPath := kv[1]
setDest = targetPath
if !path.IsAbs(targetPath) {
targetPath = filepath.Join(workDir, targetPath)
}
if err := parse.ValidateVolumeCtrDir(targetPath); err != nil {
return newMount, "", err
return newMount, "", "", err
}
newMount.Destination = targetPath
setDest = true
case "consistency":
// Option for OS X only, has no meaning on other platforms
// and can thus be safely ignored.
// See also the handling of the equivalent "delegated" and "cached" in ValidateVolumeOpts
default:
return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadMntOption)
return newMount, "", "", fmt.Errorf("%v: %w", kv[0], errBadMntOption)
}
}
// default mount readability is always readonly
if !mountReadability {
if mountReadability == "" {
newMount.Options = append(newMount.Options, "ro")
}
// Following variable ensures that we return imagename only if we did additional mount
isImageMounted := false
succeeded := false
mountedImage := ""
if fromImage != "" {
mountPoint := ""
if additionalMountPoints != nil {
@ -139,16 +206,23 @@ func GetBindMount(ctx *types.SystemContext, args []string, contextDir string, st
// if mountPoint of image was not found in additionalMap
// or additionalMap was nil, try mounting image
if mountPoint == "" {
image, err := internalUtil.LookupImage(ctx, store, fromImage)
image, err := internalUtil.LookupImage(sys, store, fromImage)
if err != nil {
return newMount, "", err
return newMount, "", "", err
}
mountPoint, err = image.Mount(context.Background(), nil, imageMountLabel)
mountPoint, err = image.Mount(context.Background(), nil, mountLabel)
if err != nil {
return newMount, "", err
return newMount, "", "", err
}
isImageMounted = true
mountedImage = image.ID()
defer func() {
if !succeeded {
if _, err := store.UnmountImage(mountedImage, false); err != nil {
logrus.Debugf("unmounting bind-mounted image %q: %v", fromImage, err)
}
}
}()
}
contextDir = mountPoint
}
@ -159,42 +233,45 @@ func GetBindMount(ctx *types.SystemContext, args []string, contextDir string, st
newMount.Options = append(newMount.Options, "rbind")
}
if !setDest {
return newMount, fromImage, errBadVolDest
if setDest == "" {
return newMount, "", "", errBadVolDest
}
// buildkit parity: support absolute path for sources from current build context
if contextDir != "" {
// path should be /contextDir/specified path
evaluated, err := copier.Eval(contextDir, string(filepath.Separator)+newMount.Source, copier.EvalOptions{})
evaluated, err := copier.Eval(contextDir, contextDir+string(filepath.Separator)+newMount.Source, copier.EvalOptions{})
if err != nil {
return newMount, "", err
return newMount, "", "", err
}
newMount.Source = evaluated
} else {
// looks like its coming from `build run --mount=type=bind` allow using absolute path
// error out if no source is set
if newMount.Source == "" {
return newMount, "", errBadVolSrc
return newMount, "", "", errBadVolSrc
}
if err := parse.ValidateVolumeHostDir(newMount.Source); err != nil {
return newMount, "", err
return newMount, "", "", err
}
}
opts, err := parse.ValidateVolumeOpts(newMount.Options)
if err != nil {
return newMount, fromImage, err
return newMount, "", "", err
}
newMount.Options = opts
if !isImageMounted {
// we don't want any cleanups if image was not mounted explicitly
// so dont return anything
fromImage = ""
overlayDir := ""
if mountedImage != "" || mountIsReadWrite(newMount) {
if newMount, overlayDir, err = convertToOverlay(newMount, store, mountLabel, tmpDir, 0, 0); err != nil {
return newMount, "", "", err
}
}
return newMount, fromImage, nil
succeeded = true
return newMount, mountedImage, overlayDir, nil
}
// CleanCacheMount gets the cache parent created by `--mount=type=cache` and removes it.
@ -341,8 +418,8 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a
if mountPoint == "" {
return newMount, nil, fmt.Errorf("no stage or additional build context found with name %s", fromStage)
}
// path should be /contextDir/specified path
evaluated, err := copier.Eval(mountPoint, string(filepath.Separator)+newMount.Source, copier.EvalOptions{})
// path should be /mountPoint/specified path
evaluated, err := copier.Eval(mountPoint, mountPoint+string(filepath.Separator)+newMount.Source, copier.EvalOptions{})
if err != nil {
return newMount, nil, err
}
@ -523,34 +600,35 @@ func Volume(volume string) (specs.Mount, error) {
return mount, nil
}
// UnlockLockArray is a helper for cleaning up after GetVolumes and the like.
func UnlockLockArray(locks []*lockfile.LockFile) {
for _, lock := range locks {
lock.Unlock()
}
}
// GetVolumes gets the volumes from --volume and --mount
//
// If this function succeeds, the caller must unlock the returned *lockfile.LockFile s if any (when??).
func GetVolumes(ctx *types.SystemContext, store storage.Store, volumes []string, mounts []string, contextDir string, workDir string) ([]specs.Mount, []string, []*lockfile.LockFile, error) {
unifiedMounts, mountedImages, targetLocks, err := getMounts(ctx, store, mounts, contextDir, workDir)
func GetVolumes(ctx *types.SystemContext, store storage.Store, mountLabel string, volumes []string, mounts []string, contextDir, workDir, tmpDir string) ([]specs.Mount, []string, []string, []*lockfile.LockFile, error) {
unifiedMounts, mountedImages, overlayDirs, targetLocks, err := getMounts(ctx, store, mountLabel, mounts, contextDir, workDir, tmpDir)
if err != nil {
return nil, mountedImages, nil, err
return nil, nil, nil, nil, err
}
succeeded := false
defer func() {
if !succeeded {
for _, overlayDir := range overlayDirs {
if err := overlay.RemoveTemp(overlayDir); err != nil {
logrus.Debugf("unmounting overlay mount at %q: %v", overlayDir, err)
}
}
for _, mountedImage := range mountedImages {
if _, err := store.UnmountImage(mountedImage, false); err != nil {
logrus.Debugf("unmounting image %q: %v", mountedImage, err)
}
}
UnlockLockArray(targetLocks)
}
}()
volumeMounts, err := getVolumeMounts(volumes)
if err != nil {
return nil, mountedImages, nil, err
return nil, nil, nil, nil, err
}
for dest, mount := range volumeMounts {
if _, ok := unifiedMounts[dest]; ok {
return nil, mountedImages, nil, fmt.Errorf("%v: %w", dest, errDuplicateDest)
return nil, nil, nil, nil, fmt.Errorf("%v: %w", dest, errDuplicateDest)
}
unifiedMounts[dest] = mount
}
@ -560,24 +638,33 @@ func GetVolumes(ctx *types.SystemContext, store storage.Store, volumes []string,
finalMounts = append(finalMounts, mount)
}
succeeded = true
return finalMounts, mountedImages, targetLocks, nil
return finalMounts, mountedImages, overlayDirs, targetLocks, nil
}
// getMounts takes user-provided input from the --mount flag and creates OCI
// spec mounts.
// buildah run --mount type=bind,src=/etc/resolv.conf,target=/etc/resolv.conf ...
// buildah run --mount type=tmpfs,target=/dev/shm ...
//
// If this function succeeds, the caller must unlock the returned *lockfile.LockFile s if any (when??).
func getMounts(ctx *types.SystemContext, store storage.Store, mounts []string, contextDir string, workDir string) (map[string]specs.Mount, []string, []*lockfile.LockFile, error) {
func getMounts(ctx *types.SystemContext, store storage.Store, mountLabel string, mounts []string, contextDir, workDir, tmpDir string) (map[string]specs.Mount, []string, []string, []*lockfile.LockFile, error) {
// If `type` is not set default to "bind"
mountType := define.TypeBind
finalMounts := make(map[string]specs.Mount)
mountedImages := make([]string, 0)
finalMounts := make(map[string]specs.Mount, len(mounts))
mountedImages := make([]string, 0, len(mounts))
overlayDirs := make([]string, 0, len(mounts))
targetLocks := make([]*lockfile.LockFile, 0)
succeeded := false
defer func() {
if !succeeded {
for _, overlayDir := range overlayDirs {
if err := overlay.RemoveTemp(overlayDir); err != nil {
logrus.Debugf("unmounting overlay mount at %q: %v", overlayDir, err)
}
}
for _, mountedImage := range mountedImages {
if _, err := store.UnmountImage(mountedImage, false); err != nil {
logrus.Debugf("unmounting image %q: %v", mountedImage, err)
}
}
UnlockLockArray(targetLocks)
}
}()
@ -590,56 +677,61 @@ func getMounts(ctx *types.SystemContext, store storage.Store, mounts []string, c
for _, mount := range mounts {
tokens := strings.Split(mount, ",")
if len(tokens) < 2 {
return nil, mountedImages, nil, fmt.Errorf("%q: %w", mount, errInvalidSyntax)
return nil, nil, nil, nil, fmt.Errorf("%q: %w", mount, errInvalidSyntax)
}
for _, field := range tokens {
if strings.HasPrefix(field, "type=") {
kv := strings.Split(field, "=")
if len(kv) != 2 {
return nil, mountedImages, nil, fmt.Errorf("%q: %w", mount, errInvalidSyntax)
return nil, nil, nil, nil, fmt.Errorf("%q: %w", mount, errInvalidSyntax)
}
mountType = kv[1]
}
}
switch mountType {
case define.TypeBind:
mount, image, err := GetBindMount(ctx, tokens, contextDir, store, "", nil, workDir)
mount, image, overlayDir, err := GetBindMount(ctx, tokens, contextDir, store, "", nil, workDir, tmpDir)
if err != nil {
return nil, mountedImages, nil, err
return nil, nil, nil, nil, err
}
if image != "" {
mountedImages = append(mountedImages, image)
}
if overlayDir != "" {
overlayDirs = append(overlayDirs, overlayDir)
}
if _, ok := finalMounts[mount.Destination]; ok {
return nil, mountedImages, nil, fmt.Errorf("%v: %w", mount.Destination, errDuplicateDest)
return nil, nil, nil, nil, fmt.Errorf("%v: %w", mount.Destination, errDuplicateDest)
}
finalMounts[mount.Destination] = mount
mountedImages = append(mountedImages, image)
case TypeCache:
mount, tl, err := GetCacheMount(tokens, store, "", nil, workDir)
if err != nil {
return nil, mountedImages, nil, err
return nil, nil, nil, nil, err
}
if tl != nil {
targetLocks = append(targetLocks, tl)
}
if _, ok := finalMounts[mount.Destination]; ok {
return nil, mountedImages, nil, fmt.Errorf("%v: %w", mount.Destination, errDuplicateDest)
return nil, nil, nil, nil, fmt.Errorf("%v: %w", mount.Destination, errDuplicateDest)
}
finalMounts[mount.Destination] = mount
case TypeTmpfs:
mount, err := GetTmpfsMount(tokens)
if err != nil {
return nil, mountedImages, nil, err
return nil, nil, nil, nil, err
}
if _, ok := finalMounts[mount.Destination]; ok {
return nil, mountedImages, nil, fmt.Errorf("%v: %w", mount.Destination, errDuplicateDest)
return nil, nil, nil, nil, fmt.Errorf("%v: %w", mount.Destination, errDuplicateDest)
}
finalMounts[mount.Destination] = mount
default:
return nil, mountedImages, nil, fmt.Errorf("invalid filesystem type %q", mountType)
return nil, nil, nil, nil, fmt.Errorf("invalid filesystem type %q", mountType)
}
}
succeeded = true
return finalMounts, mountedImages, targetLocks, nil
return finalMounts, mountedImages, overlayDirs, targetLocks, nil
}
// GetTmpfsMount parses a single tmpfs mount entry from the --mount flag
@ -697,3 +789,10 @@ func GetTmpfsMount(args []string) (specs.Mount, error) {
return newMount, nil
}
// UnlockLockArray is a helper for cleaning up after GetVolumes and the like.
func UnlockLockArray(locks []*lockfile.LockFile) {
for _, lock := range locks {
lock.Unlock()
}
}

View File

@ -16,7 +16,6 @@ import (
"github.com/containers/storage/pkg/unshare"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)

10
run.go
View File

@ -167,17 +167,19 @@ type RunOptions struct {
// RunMountArtifacts are the artifacts created when using a run mount.
type runMountArtifacts struct {
// RunMountTargets are the run mount targets inside the container
// RunMountTargets are the run mount targets inside the container which should be removed
RunMountTargets []string
// RunOverlayDirs are overlay directories which will need to be cleaned up using overlay.RemoveTemp()
RunOverlayDirs []string
// TmpFiles are artifacts that need to be removed outside the container
TmpFiles []string
// Any external images which were mounted inside container
// Any images which were mounted, which should be unmounted
MountedImages []string
// Agents are the ssh agents started
// Agents are the ssh agents started, which should have their Shutdown() methods called
Agents []*sshagent.AgentServer
// SSHAuthSock is the path to the ssh auth sock inside the container
SSHAuthSock string
// TargetLocks to be unlocked if there are any.
// Lock files, which should have their Unlock() methods called
TargetLocks []*lockfile.LockFile
}

View File

@ -27,7 +27,6 @@ import (
"github.com/containers/buildah/define"
"github.com/containers/buildah/internal"
internalParse "github.com/containers/buildah/internal/parse"
internalUtil "github.com/containers/buildah/internal/util"
"github.com/containers/buildah/pkg/overlay"
"github.com/containers/buildah/pkg/sshagent"
"github.com/containers/buildah/util"
@ -44,7 +43,6 @@ import (
"github.com/containers/storage/pkg/lockfile"
"github.com/containers/storage/pkg/reexec"
"github.com/containers/storage/pkg/unshare"
storageTypes "github.com/containers/storage/types"
"github.com/opencontainers/go-digest"
"github.com/opencontainers/runtime-spec/specs-go"
spec "github.com/opencontainers/runtime-spec/specs-go"
@ -1269,8 +1267,10 @@ func init() {
reexec.Register(runUsingRuntimeCommand, runUsingRuntimeMain)
}
// If this succeeds, the caller must call cleanupMounts().
func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath string, optionMounts []specs.Mount, bindFiles map[string]string, builtinVolumes, volumeMounts []string, runFileMounts []string, runMountInfo runMountInfo) (*runMountArtifacts, error) {
// If this succeeds, after the command which uses the spec finishes running,
// the caller must call b.cleanupRunMounts() on the returned runMountArtifacts
// structure.
func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath string, optionMounts []specs.Mount, bindFiles map[string]string, builtinVolumes []string, volumeMounts []string, runFileMounts []string, runMountInfo runMountInfo) (*runMountArtifacts, error) {
// Start building a new list of mounts.
var mounts []specs.Mount
haveMount := func(destination string) bool {
@ -1328,14 +1328,16 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath st
processGID: int(processGID),
}
// Get the list of mounts that are just for this Run() call.
runMounts, mountArtifacts, err := b.runSetupRunMounts(runFileMounts, runMountInfo, idMaps)
runMounts, mountArtifacts, err := b.runSetupRunMounts(bundlePath, runFileMounts, runMountInfo, idMaps)
if err != nil {
return nil, err
}
succeeded := false
defer func() {
if !succeeded {
internalParse.UnlockLockArray(mountArtifacts.TargetLocks)
if err := b.cleanupRunMounts(mountPoint, mountArtifacts); err != nil {
b.Logger.Debugf("cleaning up run mounts: %v", err)
}
}
}()
// Add temporary copies of the contents of volume locations at the
@ -1464,27 +1466,50 @@ func cleanableDestinationListFromMounts(mounts []spec.Mount) []string {
return mountDest
}
// runSetupRunMounts sets up mounts that exist only in this RUN, not in subsequent runs
//
// If this function succeeds, the caller must unlock runMountArtifacts.TargetLocks (when??)
func (b *Builder) runSetupRunMounts(mounts []string, sources runMountInfo, idMaps IDMaps) ([]spec.Mount, *runMountArtifacts, error) {
// If this function succeeds, the caller must free the returned
// runMountArtifacts by calling b.cleanupRunMounts() after the command being
// executed with those mounts has finished.
func (b *Builder) runSetupRunMounts(bundlePath string, mounts []string, sources runMountInfo, idMaps IDMaps) ([]spec.Mount, *runMountArtifacts, error) {
// If `type` is not set default to TypeBind
mountType := define.TypeBind
mountTargets := make([]string, 0, 10)
mountTargets := make([]string, 0, len(mounts))
tmpFiles := make([]string, 0, len(mounts))
mountImages := make([]string, 0, 10)
mountImages := make([]string, 0, len(mounts))
finalMounts := make([]specs.Mount, 0, len(mounts))
agents := make([]*sshagent.AgentServer, 0, len(mounts))
sshCount := 0
defaultSSHSock := ""
targetLocks := []*lockfile.LockFile{}
var overlayDirs []string
succeeded := false
defer func() {
if !succeeded {
for _, agent := range agents {
servePath := agent.ServePath()
if err := agent.Shutdown(); err != nil {
b.Logger.Errorf("shutting down SSH agent at %q: %v", servePath, err)
}
}
for _, overlayDir := range overlayDirs {
if err := overlay.RemoveTemp(overlayDir); err != nil {
b.Logger.Error(err.Error())
}
}
for _, mountImage := range mountImages {
if _, err := b.store.UnmountImage(mountImage, false); err != nil {
b.Logger.Error(err.Error())
}
}
for _, tmpFile := range tmpFiles {
if err := os.Remove(tmpFile); err != nil && !errors.Is(err, os.ErrNotExist) {
b.Logger.Error(err.Error())
}
}
internalParse.UnlockLockArray(targetLocks)
}
}()
for _, mount := range mounts {
var err error
var bundleMountsDir string
tokens := strings.Split(mount, ",")
for _, field := range tokens {
if strings.HasPrefix(field, "type=") {
@ -1509,31 +1534,34 @@ func (b *Builder) runSetupRunMounts(mounts []string, sources runMountInfo, idMap
}
}
case "ssh":
mount, agent, err := b.getSSHMount(tokens, sshCount, sources.SSHSources, idMaps)
mount, agent, err := b.getSSHMount(tokens, len(agents), sources.SSHSources, idMaps)
if err != nil {
return nil, nil, err
}
if mount != nil {
finalMounts = append(finalMounts, *mount)
mountTargets = append(mountTargets, mount.Destination)
agents = append(agents, agent)
if sshCount == 0 {
if len(agents) == 0 {
defaultSSHSock = mount.Destination
}
// Count is needed as the default destination of the ssh sock inside the container is /run/buildkit/ssh_agent.{i}
sshCount++
agents = append(agents, agent)
}
case define.TypeBind:
mount, image, err := b.getBindMount(tokens, sources.SystemContext, sources.ContextDir, sources.StageMountPoints, idMaps, sources.WorkDir)
if bundleMountsDir, err = os.MkdirTemp(bundlePath, "mounts"); err != nil {
return nil, nil, err
}
mount, image, overlayDir, err := b.getBindMount(tokens, sources.SystemContext, sources.ContextDir, sources.StageMountPoints, idMaps, sources.WorkDir, bundleMountsDir)
if err != nil {
return nil, nil, err
}
finalMounts = append(finalMounts, *mount)
mountTargets = append(mountTargets, mount.Destination)
// only perform cleanup if image was mounted ignore everything else
if image != "" {
mountImages = append(mountImages, image)
}
if overlayDir != "" {
overlayDirs = append(overlayDirs, overlayDir)
}
finalMounts = append(finalMounts, *mount)
mountTargets = append(mountTargets, mount.Destination)
case "tmpfs":
mount, err := b.getTmpfsMount(tokens, idMaps)
if err != nil {
@ -1546,11 +1574,11 @@ func (b *Builder) runSetupRunMounts(mounts []string, sources runMountInfo, idMap
if err != nil {
return nil, nil, err
}
finalMounts = append(finalMounts, *mount)
mountTargets = append(mountTargets, mount.Destination)
if tl != nil {
targetLocks = append(targetLocks, tl)
}
finalMounts = append(finalMounts, *mount)
mountTargets = append(mountTargets, mount.Destination)
default:
return nil, nil, fmt.Errorf("invalid mount type %q", mountType)
}
@ -1558,6 +1586,7 @@ func (b *Builder) runSetupRunMounts(mounts []string, sources runMountInfo, idMap
succeeded = true
artifacts := &runMountArtifacts{
RunMountTargets: mountTargets,
RunOverlayDirs: overlayDirs,
TmpFiles: tmpFiles,
Agents: agents,
MountedImages: mountImages,
@ -1567,21 +1596,21 @@ func (b *Builder) runSetupRunMounts(mounts []string, sources runMountInfo, idMap
return finalMounts, artifacts, nil
}
func (b *Builder) getBindMount(tokens []string, context *imageTypes.SystemContext, contextDir string, stageMountPoints map[string]internal.StageMountDetails, idMaps IDMaps, workDir string) (*spec.Mount, string, error) {
func (b *Builder) getBindMount(tokens []string, sys *imageTypes.SystemContext, contextDir string, stageMountPoints map[string]internal.StageMountDetails, idMaps IDMaps, workDir, tmpDir string) (*spec.Mount, string, string, error) {
if contextDir == "" {
return nil, "", errors.New("Context Directory for current run invocation is not configured")
return nil, "", "", errors.New("context directory for current run invocation is not configured")
}
var optionMounts []specs.Mount
mount, image, err := internalParse.GetBindMount(context, tokens, contextDir, b.store, b.MountLabel, stageMountPoints, workDir)
mount, image, overlayDir, err := internalParse.GetBindMount(sys, tokens, contextDir, b.store, b.MountLabel, stageMountPoints, workDir, tmpDir)
if err != nil {
return nil, image, err
return nil, image, overlayDir, err
}
optionMounts = append(optionMounts, mount)
volumes, err := b.runSetupVolumeMounts(b.MountLabel, nil, optionMounts, idMaps)
if err != nil {
return nil, image, err
return nil, image, overlayDir, err
}
return &volumes[0], image, nil
return &volumes[0], image, overlayDir, nil
}
func (b *Builder) getTmpfsMount(tokens []string, idMaps IDMaps) (*spec.Mount, error) {
@ -1850,7 +1879,7 @@ func (b *Builder) cleanupTempVolumes() {
}
// cleanupRunMounts cleans up run mounts so they only appear in this run.
func (b *Builder) cleanupRunMounts(context *imageTypes.SystemContext, mountpoint string, artifacts *runMountArtifacts) error {
func (b *Builder) cleanupRunMounts(mountpoint string, artifacts *runMountArtifacts) error {
for _, agent := range artifacts.Agents {
err := agent.Shutdown()
if err != nil {
@ -1858,42 +1887,34 @@ func (b *Builder) cleanupRunMounts(context *imageTypes.SystemContext, mountpoint
}
}
//cleanup any mounted images for this run
for _, image := range artifacts.MountedImages {
if image != "" {
// if flow hits here some image was mounted for this run
i, err := internalUtil.LookupImage(context, b.store, image)
if err == nil {
// silently try to unmount and do nothing
// if image is being used by something else
_ = i.Unmount(false)
}
if errors.Is(err, storageTypes.ErrImageUnknown) {
// Ignore only if ErrImageUnknown
// Reason: Image is already unmounted do nothing
continue
}
// cleanup any overlays we mounted
for _, overlayDirectory := range artifacts.RunOverlayDirs {
if err := overlay.RemoveTemp(overlayDirectory); err != nil {
return err
}
}
// unmount any images we mounted for this run
for _, image := range artifacts.MountedImages {
if _, err := b.store.UnmountImage(image, false); err != nil {
logrus.Debugf("umounting image %q: %v", image, err)
}
}
// remove mount targets that were created for this run
opts := copier.RemoveOptions{
All: true,
}
for _, path := range artifacts.RunMountTargets {
err := copier.Remove(mountpoint, path, opts)
if err != nil {
return err
if err := copier.Remove(mountpoint, path, opts); err != nil {
return fmt.Errorf("removing mount target %q %q: %w", mountpoint, path, err)
}
}
var prevErr error
for _, path := range artifacts.TmpFiles {
err := os.Remove(path)
if !errors.Is(err, os.ErrNotExist) {
if err := os.Remove(path); err != nil && !errors.Is(err, os.ErrNotExist) {
if prevErr != nil {
logrus.Error(prevErr)
}
prevErr = err
prevErr = fmt.Errorf("removing temporary file: %w", err)
}
}
// unlock if any locked files from this RUN statement

View File

@ -231,7 +231,7 @@ func (b *Builder) Run(command []string, options RunOptions) error {
}
defer func() {
if err := b.cleanupRunMounts(options.SystemContext, mountPoint, runArtifacts); err != nil {
if err := b.cleanupRunMounts(mountPoint, runArtifacts); err != nil {
options.Logger.Errorf("unable to cleanup run mounts %v", err)
}
}()

View File

@ -347,7 +347,7 @@ rootless=%d
}
defer func() {
if err := b.cleanupRunMounts(options.SystemContext, mountPoint, runArtifacts); err != nil {
if err := b.cleanupRunMounts(mountPoint, runArtifacts); err != nil {
options.Logger.Errorf("unable to cleanup run mounts %v", err)
}
}()
@ -910,12 +910,15 @@ func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string,
return specs.Mount{}, fmt.Errorf("failed to create TempDir in the %s directory: %w", containerDir, err)
}
graphOptions := b.store.GraphOptions()
graphOptsCopy := make([]string, len(graphOptions))
copy(graphOptsCopy, graphOptions)
overlayOpts := overlay.Options{
RootUID: idMaps.rootUID,
RootGID: idMaps.rootGID,
UpperDirOptionFragment: upperDir,
WorkDirOptionFragment: workDir,
GraphOpts: b.store.GraphOptions(),
GraphOpts: graphOptsCopy,
}
overlayMount, err := overlay.MountWithOptions(contentDir, host, container, &overlayOpts)
@ -924,7 +927,7 @@ func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string,
}
// If chown true, add correct ownership to the overlay temp directories.
if foundU {
if err == nil && foundU {
if err := chown.ChangeHostPathOwnership(contentDir, true, idMaps.processUID, idMaps.processGID); err != nil {
return specs.Mount{}, err
}

View File

@ -5981,4 +5981,34 @@ EOF
run_buildah 1 build --security-opt label=disable --build-context testbuild=${TEST_SCRATCH_DIR}/cve20249675/ --no-cache ${TEST_SCRATCH_DIR}/cve20249675/
expect_output --substring "cat: can't open '/var/tmp/file.txt': No such file or directory"
mkdir ${TEST_SCRATCH_DIR}/cachedir
run_buildah 1 build --security-opt label=disable --build-context testbuild=${TEST_SCRATCH_DIR}/cachedir/ --no-cache ${TEST_SCRATCH_DIR}/cve20249675/
expect_output --substring "cat: can't open '/var/tmp/file.txt': No such file or directory"
}
@test "build-mounts-build-context-rw" {
zflag=
if which selinuxenabled > /dev/null 2> /dev/null ; then
if selinuxenabled ; then
zflag=,z
fi
fi
base=busybox
_prefetch $base
mkdir -p ${TEST_SCRATCH_DIR}/buildcontext
cat > ${TEST_SCRATCH_DIR}/buildcontext/Dockerfile << EOF
FROM $base
RUN --mount=type=bind,dst=/dst,source=/,rw${zflag} \
mkdir /dst/subdir ; \
chown 1000:1000 /dst/subdir ; \
chmod 777 /dst/subdir ; \
touch /dst/subdir/file-suid ; \
chmod 4777 /dst/subdir/file-suid
EOF
run_buildah build ${TEST_SCRATCH_DIR}/buildcontext
run find ${TEST_SCRATCH_DIR}/buildcontext -name file-suid -ls
find ${TEST_SCRATCH_DIR}/buildcontext -ls
expect_output "" "build should not be able to write to build context"
}

View File

@ -0,0 +1,8 @@
FROM alpine AS base
RUN touch -r /etc/os-release /1.txt
FROM alpine AS interloper
RUN --mount=type=bind,from=base,source=/,destination=/base,rw touch -r /etc/os-release /base/2.txt
FROM interloper
RUN --mount=type=bind,from=base,source=/,destination=/base mkdir /base2 && cp -a /base/*.txt /base2/ && touch -r /etc/os-release /base2 && ls -la /base2

View File

@ -412,13 +412,25 @@ function configure_and_check_user() {
mkdir -p ${TEST_SCRATCH_DIR}/was:empty
# As a baseline, this should succeed.
run_buildah run --mount type=tmpfs,dst=/var/tmpfs-not-empty $cid touch /var/tmpfs-not-empty/testfile
# This should succeed, but the writes should effectively be discarded
run_buildah run --mount type=bind,src=${TEST_SCRATCH_DIR}/was:empty,dst=/var/not-empty,rw${zflag:+,${zflag}} $cid touch /var/not-empty/testfile
if test -r ${TEST_SCRATCH_DIR}/was:empty/testfile ; then
die write to mounted type=bind was not discarded, ${TEST_SCRATCH_DIR}/was:empty/testfile exists
fi
# If we're parsing the options at all, this should be read-only, so it should fail.
run_buildah 1 run --mount type=bind,src=${TEST_SCRATCH_DIR}/was:empty,dst=/var/not-empty,ro${zflag:+,${zflag}} $cid touch /var/not-empty/testfile
# Even if the parent directory doesn't exist yet, this should succeed.
run_buildah run --mount type=bind,src=${TEST_SCRATCH_DIR}/was:empty,dst=/var/multi-level/subdirectory,rw $cid touch /var/multi-level/subdirectory/testfile
# And check the same for file volumes.
run_buildah run --mount type=bind,src=${TEST_SCRATCH_DIR}/was:empty/testfile,dst=/var/different-multi-level/subdirectory/testfile,rw $cid touch /var/different-multi-level/subdirectory/testfile
# Even if the parent directory doesn't exist yet, this should succeed, but again the write should be discarded.
run_buildah run --mount type=bind,src=${TEST_SCRATCH_DIR}/was:empty,dst=/var/multi-level/subdirectory,rw${zflag:+,${zflag}} $cid touch /var/multi-level/subdirectory/testfile
if test -r ${TEST_SCRATCH_DIR}/was:empty/testfile ; then
die write to mounted type=bind was not discarded, ${TEST_SCRATCH_DIR}/was:empty/testfile exists
fi
# And check the same for file volumes, which make life harder because the kernel's overlay
# filesystem really only wants to be dealing with directories.
: > ${TEST_SCRATCH_DIR}/was:empty/testfile
run_buildah run --mount type=bind,src=${TEST_SCRATCH_DIR}/was:empty/testfile,dst=/var/different-multi-level/subdirectory/testfile,rw${zflag:+,${zflag}} $cid sh -c 'echo wrote > /var/different-multi-level/subdirectory/testfile'
if test -s ${TEST_SCRATCH_DIR}/was:empty/testfile ; then
die write to mounted type=bind was not discarded, ${TEST_SCRATCH_DIR}/was:empty/testfile was written to
fi
}
@test "run --mount=type=bind with from like buildkit" {
@ -426,16 +438,15 @@ function configure_and_check_user() {
zflag=
if which selinuxenabled > /dev/null 2> /dev/null ; then
if selinuxenabled ; then
skip "skip if selinux enabled, since stages have different selinux label"
zflag=,z
fi
fi
run_buildah build -t buildkitbase $WITH_POLICY_JSON -f $BUDFILES/buildkit-mount-from/Dockerfilebuildkitbase $BUDFILES/buildkit-mount-from/
_prefetch alpine
run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
cid=$output
run_buildah run --mount type=bind,source=.,from=buildkitbase,target=/test,z $cid cat /test/hello
run_buildah run --mount type=bind,source=.,from=buildkitbase,target=/test${zflag} $cid cat /test/hello
expect_output --substring "hello"
run_buildah rmi -f buildkitbase
}
@test "run --mount=type=cache like buildkit" {
@ -443,14 +454,14 @@ function configure_and_check_user() {
zflag=
if which selinuxenabled > /dev/null 2> /dev/null ; then
if selinuxenabled ; then
skip "skip if selinux enabled, since stages have different selinux label"
zflag=,z
fi
fi
_prefetch alpine
run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
cid=$output
run_buildah run --mount type=cache,target=/test,z $cid sh -c 'echo "hello" > /test/hello && cat /test/hello'
run_buildah run --mount type=cache,target=/test,z $cid cat /test/hello
run_buildah run --mount type=cache,target=/test${zflag} $cid sh -c 'echo "hello" > /test/hello && cat /test/hello'
run_buildah run --mount type=cache,target=/test${zflag} $cid cat /test/hello
expect_output --substring "hello"
}