Fix TOCTOU error when bind and cache mounts use "src" values

Fix a time-of-check/time-of-use error when mounting type=bind and
type=cache directories that use a "src" flag.  A hostile writer could
use a concurrently-running stage or build to replace that "src" location
between the point when we had resolved possible symbolic links and when
runc/crun/whatever actually went to create the bind mount
(CVE-2024-11218).

Stop ignoring the "src" option for cache mounts when there's no "from"
option.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
This commit is contained in:
Nalin Dahyabhai 2024-12-13 16:55:59 -05:00
parent 2c05dfd1c7
commit 8b46071ab5
10 changed files with 312 additions and 145 deletions

View File

@ -13,6 +13,7 @@ import (
"github.com/containers/buildah/pkg/overlay" "github.com/containers/buildah/pkg/overlay"
"github.com/containers/buildah/pkg/parse" "github.com/containers/buildah/pkg/parse"
"github.com/containers/buildah/util" "github.com/containers/buildah/util"
"github.com/containers/storage/pkg/mount"
"github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/spf13/cobra" "github.com/spf13/cobra"
@ -184,7 +185,7 @@ func runCmd(c *cobra.Command, args []string, iopts runInputOptions) error {
if err != nil { if err != nil {
return fmt.Errorf("building system context: %w", err) return fmt.Errorf("building system context: %w", err)
} }
mounts, mountedImages, _, targetLocks, err := volumes.GetVolumes(systemContext, store, builder.MountLabel, iopts.volumes, iopts.mounts, iopts.contextDir, iopts.workingDir, tmpDir) mounts, mountedImages, intermediateMounts, _, targetLocks, err := volumes.GetVolumes(systemContext, store, builder.MountLabel, iopts.volumes, iopts.mounts, iopts.contextDir, iopts.workingDir, tmpDir)
if err != nil { if err != nil {
return err return err
} }
@ -192,6 +193,14 @@ func runCmd(c *cobra.Command, args []string, iopts runInputOptions) error {
if err := overlay.CleanupContent(tmpDir); err != nil { if err := overlay.CleanupContent(tmpDir); err != nil {
logrus.Debugf("unmounting overlay mounts under %q: %v", tmpDir, err) logrus.Debugf("unmounting overlay mounts under %q: %v", tmpDir, err)
} }
for _, intermediateMount := range intermediateMounts {
if err := mount.Unmount(intermediateMount); err != nil {
logrus.Debugf("unmounting mount %q: %v", intermediateMount, err)
}
if err := os.Remove(intermediateMount); err != nil {
logrus.Debugf("removing should-be-empty mount directory %q: %v", intermediateMount, err)
}
}
for _, mountedImage := range mountedImages { for _, mountedImage := range mountedImages {
if _, err := store.UnmountImage(mountedImage, false); err != nil { if _, err := store.UnmountImage(mountedImage, false); err != nil {
logrus.Debugf("unmounting image %q: %v", mountedImage, err) logrus.Debugf("unmounting image %q: %v", mountedImage, err)

View File

@ -23,6 +23,7 @@ import (
"github.com/containers/storage" "github.com/containers/storage"
"github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/lockfile" "github.com/containers/storage/pkg/lockfile"
"github.com/containers/storage/pkg/mount"
"github.com/containers/storage/pkg/unshare" "github.com/containers/storage/pkg/unshare"
digest "github.com/opencontainers/go-digest" digest "github.com/opencontainers/go-digest"
specs "github.com/opencontainers/runtime-spec/specs-go" specs "github.com/opencontainers/runtime-spec/specs-go"
@ -117,10 +118,19 @@ func convertToOverlay(m specs.Mount, store storage.Store, mountLabel, tmpDir str
} }
// FIXME: this code needs to be merged with pkg/parse/parse.go ValidateVolumeOpts // FIXME: this code needs to be merged with pkg/parse/parse.go ValidateVolumeOpts
//
// GetBindMount parses a single bind mount entry from the --mount flag. // GetBindMount parses a single bind mount entry from the --mount flag.
// Returns specifiedMount and a string which contains name of image that we mounted otherwise its empty. //
// Caller is expected to perform unmount of any mounted images // Returns a Mount to add to the runtime spec's list of mounts, the ID of the
func GetBindMount(sys *types.SystemContext, args []string, contextDir string, store storage.Store, mountLabel string, additionalMountPoints map[string]internal.StageMountDetails, workDir, tmpDir string) (specs.Mount, string, string, error) { // image we mounted if we mounted one, the path of a mounted location if one
// needs to be unmounted and removed, and the path of an overlay mount if one
// needs to be cleaned up, or an error.
//
// The caller is expected to, after the command which uses the mount exits,
// clean up the overlay filesystem (if we provided a path to it), unmount and
// remove the mountpoint for the mounted filesystem (if we provided the path to
// its mountpoint), and then unmount the image (if we mounted one).
func GetBindMount(sys *types.SystemContext, args []string, contextDir string, store storage.Store, mountLabel string, additionalMountPoints map[string]internal.StageMountDetails, workDir, tmpDir string) (specs.Mount, string, string, string, error) {
newMount := specs.Mount{ newMount := specs.Mount{
Type: define.TypeBind, Type: define.TypeBind,
} }
@ -152,39 +162,39 @@ func GetBindMount(sys *types.SystemContext, args []string, contextDir string, st
mountReadability = "ro" mountReadability = "ro"
case "shared", "rshared", "private", "rprivate", "slave", "rslave", "Z", "z", "U", "no-dereference": case "shared", "rshared", "private", "rprivate", "slave", "rslave", "Z", "z", "U", "no-dereference":
if hasArgValue { if hasArgValue {
return newMount, "", "", fmt.Errorf("%v: %w", val, errBadOptionArg) return newMount, "", "", "", fmt.Errorf("%v: %w", val, errBadOptionArg)
} }
newMount.Options = append(newMount.Options, argName) newMount.Options = append(newMount.Options, argName)
case "from": case "from":
if !hasArgValue { if !hasArgValue {
return newMount, "", "", fmt.Errorf("%v: %w", argName, errBadOptionArg) return newMount, "", "", "", fmt.Errorf("%v: %w", argName, errBadOptionArg)
} }
fromImage = argValue fromImage = argValue
case "bind-propagation": case "bind-propagation":
if !hasArgValue { if !hasArgValue {
return newMount, "", "", fmt.Errorf("%v: %w", argName, errBadOptionArg) return newMount, "", "", "", fmt.Errorf("%v: %w", argName, errBadOptionArg)
} }
switch argValue { switch argValue {
default: default:
return newMount, "", "", fmt.Errorf("%v: %q: %w", argName, argValue, errBadMntOption) return newMount, "", "", "", fmt.Errorf("%v: %q: %w", argName, argValue, errBadMntOption)
case "shared", "rshared", "private", "rprivate", "slave", "rslave": case "shared", "rshared", "private", "rprivate", "slave", "rslave":
// this should be the relevant parts of the same list of options we accepted above // this should be the relevant parts of the same list of options we accepted above
} }
switch argValue { switch argValue {
default: default:
return newMount, "", "", fmt.Errorf("%v: %q: %w", argName, argValue, errBadMntOption) return newMount, "", "", "", fmt.Errorf("%v: %q: %w", argName, argValue, errBadMntOption)
case "shared", "rshared", "private", "rprivate", "slave", "rslave": case "shared", "rshared", "private", "rprivate", "slave", "rslave":
// this should be the relevant parts of the same list of options we accepted above // this should be the relevant parts of the same list of options we accepted above
} }
newMount.Options = append(newMount.Options, argValue) newMount.Options = append(newMount.Options, argValue)
case "src", "source": case "src", "source":
if !hasArgValue { if !hasArgValue {
return newMount, "", "", fmt.Errorf("%v: %w", argName, errBadOptionArg) return newMount, "", "", "", fmt.Errorf("%v: %w", argName, errBadOptionArg)
} }
newMount.Source = argValue newMount.Source = argValue
case "target", "dst", "destination": case "target", "dst", "destination":
if !hasArgValue { if !hasArgValue {
return newMount, "", "", fmt.Errorf("%v: %w", argName, errBadOptionArg) return newMount, "", "", "", fmt.Errorf("%v: %w", argName, errBadOptionArg)
} }
targetPath := argValue targetPath := argValue
setDest = targetPath setDest = targetPath
@ -192,12 +202,12 @@ func GetBindMount(sys *types.SystemContext, args []string, contextDir string, st
targetPath = filepath.Join(workDir, targetPath) targetPath = filepath.Join(workDir, targetPath)
} }
if err := parse.ValidateVolumeCtrDir(targetPath); err != nil { if err := parse.ValidateVolumeCtrDir(targetPath); err != nil {
return newMount, "", "", err return newMount, "", "", "", err
} }
newMount.Destination = targetPath newMount.Destination = targetPath
case "relabel": case "relabel":
if setRelabel != "" { if setRelabel != "" {
return newMount, "", "", fmt.Errorf("cannot pass 'relabel' option more than once: %w", errBadOptionArg) return newMount, "", "", "", fmt.Errorf("cannot pass 'relabel' option more than once: %w", errBadOptionArg)
} }
setRelabel = argValue setRelabel = argValue
switch argValue { switch argValue {
@ -206,14 +216,14 @@ func GetBindMount(sys *types.SystemContext, args []string, contextDir string, st
case "shared": case "shared":
newMount.Options = append(newMount.Options, "z") newMount.Options = append(newMount.Options, "z")
default: default:
return newMount, "", "", fmt.Errorf("%s mount option must be 'private' or 'shared': %w", argName, errBadMntOption) return newMount, "", "", "", fmt.Errorf("%s mount option must be 'private' or 'shared': %w", argName, errBadMntOption)
} }
case "consistency": case "consistency":
// Option for OS X only, has no meaning on other platforms // Option for OS X only, has no meaning on other platforms
// and can thus be safely ignored. // and can thus be safely ignored.
// See also the handling of the equivalent "delegated" and "cached" in ValidateVolumeOpts // See also the handling of the equivalent "delegated" and "cached" in ValidateVolumeOpts
default: default:
return newMount, "", "", fmt.Errorf("%v: %w", argName, errBadMntOption) return newMount, "", "", "", fmt.Errorf("%v: %w", argName, errBadMntOption)
} }
} }
@ -237,12 +247,12 @@ func GetBindMount(sys *types.SystemContext, args []string, contextDir string, st
if mountPoint == "" { if mountPoint == "" {
image, err := internalUtil.LookupImage(sys, store, fromImage) image, err := internalUtil.LookupImage(sys, store, fromImage)
if err != nil { if err != nil {
return newMount, "", "", err return newMount, "", "", "", err
} }
mountPoint, err = image.Mount(context.Background(), nil, mountLabel) mountPoint, err = image.Mount(context.Background(), nil, mountLabel)
if err != nil { if err != nil {
return newMount, "", "", err return newMount, "", "", "", err
} }
mountedImage = image.ID() mountedImage = image.ID()
defer func() { defer func() {
@ -263,7 +273,7 @@ func GetBindMount(sys *types.SystemContext, args []string, contextDir string, st
} }
if setDest == "" { if setDest == "" {
return newMount, "", "", errBadVolDest return newMount, "", "", "", errBadVolDest
} }
// buildkit parity: support absolute path for sources from current build context // buildkit parity: support absolute path for sources from current build context
@ -271,42 +281,64 @@ func GetBindMount(sys *types.SystemContext, args []string, contextDir string, st
// path should be /contextDir/specified path // path should be /contextDir/specified path
evaluated, err := copier.Eval(contextDir, contextDir+string(filepath.Separator)+newMount.Source, copier.EvalOptions{}) evaluated, err := copier.Eval(contextDir, contextDir+string(filepath.Separator)+newMount.Source, copier.EvalOptions{})
if err != nil { if err != nil {
return newMount, "", "", err return newMount, "", "", "", err
} }
newMount.Source = evaluated newMount.Source = evaluated
} else { } else {
// looks like its coming from `build run --mount=type=bind` allow using absolute path // looks like its coming from `build run --mount=type=bind` allow using absolute path
// error out if no source is set // error out if no source is set
if newMount.Source == "" { if newMount.Source == "" {
return newMount, "", "", errBadVolSrc return newMount, "", "", "", errBadVolSrc
} }
if err := parse.ValidateVolumeHostDir(newMount.Source); err != nil { if err := parse.ValidateVolumeHostDir(newMount.Source); err != nil {
return newMount, "", "", err return newMount, "", "", "", err
} }
} }
opts, err := parse.ValidateVolumeOpts(newMount.Options) opts, err := parse.ValidateVolumeOpts(newMount.Options)
if err != nil { if err != nil {
return newMount, "", "", err return newMount, "", "", "", err
} }
newMount.Options = opts newMount.Options = opts
var intermediateMount string
if contextDir != "" && newMount.Source != contextDir {
rel, err := filepath.Rel(contextDir, newMount.Source)
if err != nil {
return newMount, "", "", "", fmt.Errorf("computing pathname of bind subdirectory: %w", err)
}
if rel != "." && rel != "/" {
mnt, err := bindFromChroot(contextDir, rel, tmpDir)
if err != nil {
return newMount, "", "", "", fmt.Errorf("sanitizing bind subdirectory %q: %w", newMount.Source, err)
}
logrus.Debugf("bind-mounted %q under %q to %q", rel, contextDir, mnt)
intermediateMount = mnt
newMount.Source = intermediateMount
}
}
overlayDir := "" overlayDir := ""
if mountedImage != "" || mountIsReadWrite(newMount) { if mountedImage != "" || mountIsReadWrite(newMount) {
if newMount, overlayDir, err = convertToOverlay(newMount, store, mountLabel, tmpDir, 0, 0); err != nil { if newMount, overlayDir, err = convertToOverlay(newMount, store, mountLabel, tmpDir, 0, 0); err != nil {
return newMount, "", "", err return newMount, "", "", "", err
} }
} }
succeeded = true succeeded = true
return newMount, mountedImage, intermediateMount, overlayDir, nil
return newMount, mountedImage, overlayDir, nil
} }
// GetCacheMount parses a single cache mount entry from the --mount flag. // GetCacheMount parses a single cache mount entry from the --mount flag.
// //
// If this function succeeds and returns a non-nil *lockfile.LockFile, the caller must unlock it (when??). // Returns a Mount to add to the runtime spec's list of mounts, the path of a
func GetCacheMount(args []string, store storage.Store, imageMountLabel string, additionalMountPoints map[string]internal.StageMountDetails, workDir string) (specs.Mount, *lockfile.LockFile, error) { // mounted filesystem if one needs to be unmounted, and an optional lock that
// needs to be released, or an error.
//
// The caller is expected to, after the command which uses the mount exits,
// unmount and remove the mountpoint of the mounted filesystem (if we provided
// the path to its mountpoint) and release the lock (if we took one).
func GetCacheMount(args []string, additionalMountPoints map[string]internal.StageMountDetails, workDir, tmpDir string) (specs.Mount, string, *lockfile.LockFile, error) {
var err error var err error
var mode uint64 var mode uint64
var buildahLockFilesDir string var buildahLockFilesDir string
@ -357,69 +389,75 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a
sharing = argValue sharing = argValue
case "bind-propagation": case "bind-propagation":
if !hasArgValue { if !hasArgValue {
return newMount, nil, fmt.Errorf("%v: %w", argName, errBadOptionArg) return newMount, "", nil, fmt.Errorf("%v: %w", argName, errBadOptionArg)
} }
switch argValue { switch argValue {
default: default:
return newMount, nil, fmt.Errorf("%v: %q: %w", argName, argValue, errBadMntOption) return newMount, "", nil, fmt.Errorf("%v: %q: %w", argName, argValue, errBadMntOption)
case "shared", "rshared", "private", "rprivate", "slave", "rslave":
// this should be the relevant parts of the same list of options we accepted above
}
switch argValue {
default:
return newMount, "", nil, fmt.Errorf("%v: %q: %w", argName, argValue, errBadMntOption)
case "shared", "rshared", "private", "rprivate", "slave", "rslave": case "shared", "rshared", "private", "rprivate", "slave", "rslave":
// this should be the relevant parts of the same list of options we accepted above // this should be the relevant parts of the same list of options we accepted above
} }
newMount.Options = append(newMount.Options, argValue) newMount.Options = append(newMount.Options, argValue)
case "id": case "id":
if !hasArgValue { if !hasArgValue {
return newMount, nil, fmt.Errorf("%v: %w", argName, errBadOptionArg) return newMount, "", nil, fmt.Errorf("%v: %w", argName, errBadOptionArg)
} }
id = argValue id = argValue
case "from": case "from":
if !hasArgValue { if !hasArgValue {
return newMount, nil, fmt.Errorf("%v: %w", argName, errBadOptionArg) return newMount, "", nil, fmt.Errorf("%v: %w", argName, errBadOptionArg)
} }
fromStage = argValue fromStage = argValue
case "target", "dst", "destination": case "target", "dst", "destination":
if !hasArgValue { if !hasArgValue {
return newMount, nil, fmt.Errorf("%v: %w", argName, errBadOptionArg) return newMount, "", nil, fmt.Errorf("%v: %w", argName, errBadOptionArg)
} }
targetPath := argValue targetPath := argValue
if !path.IsAbs(targetPath) { if !path.IsAbs(targetPath) {
targetPath = filepath.Join(workDir, targetPath) targetPath = filepath.Join(workDir, targetPath)
} }
if err := parse.ValidateVolumeCtrDir(targetPath); err != nil { if err := parse.ValidateVolumeCtrDir(targetPath); err != nil {
return newMount, nil, err return newMount, "", nil, err
} }
newMount.Destination = targetPath newMount.Destination = targetPath
setDest = true setDest = true
case "src", "source": case "src", "source":
if !hasArgValue { if !hasArgValue {
return newMount, nil, fmt.Errorf("%v: %w", argName, errBadOptionArg) return newMount, "", nil, fmt.Errorf("%v: %w", argName, errBadOptionArg)
} }
newMount.Source = argValue newMount.Source = argValue
case "mode": case "mode":
if !hasArgValue { if !hasArgValue {
return newMount, nil, fmt.Errorf("%v: %w", argName, errBadOptionArg) return newMount, "", nil, fmt.Errorf("%v: %w", argName, errBadOptionArg)
} }
mode, err = strconv.ParseUint(argValue, 8, 32) mode, err = strconv.ParseUint(argValue, 8, 32)
if err != nil { if err != nil {
return newMount, nil, fmt.Errorf("unable to parse cache mode: %w", err) return newMount, "", nil, fmt.Errorf("unable to parse cache mode: %w", err)
} }
case "uid": case "uid":
if !hasArgValue { if !hasArgValue {
return newMount, nil, fmt.Errorf("%v: %w", argName, errBadOptionArg) return newMount, "", nil, fmt.Errorf("%v: %w", argName, errBadOptionArg)
} }
uid, err = strconv.Atoi(argValue) uid, err = strconv.Atoi(argValue)
if err != nil { if err != nil {
return newMount, nil, fmt.Errorf("unable to parse cache uid: %w", err) return newMount, "", nil, fmt.Errorf("unable to parse cache uid: %w", err)
} }
case "gid": case "gid":
if !hasArgValue { if !hasArgValue {
return newMount, nil, fmt.Errorf("%v: %w", argName, errBadOptionArg) return newMount, "", nil, fmt.Errorf("%v: %w", argName, errBadOptionArg)
} }
gid, err = strconv.Atoi(argValue) gid, err = strconv.Atoi(argValue)
if err != nil { if err != nil {
return newMount, nil, fmt.Errorf("unable to parse cache gid: %w", err) return newMount, "", nil, fmt.Errorf("unable to parse cache gid: %w", err)
} }
default: default:
return newMount, nil, fmt.Errorf("%v: %w", argName, errBadMntOption) return newMount, "", nil, fmt.Errorf("%v: %w", argName, errBadMntOption)
} }
} }
@ -430,10 +468,14 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a
} }
if !setDest { if !setDest {
return newMount, nil, errBadVolDest return newMount, "", nil, errBadVolDest
} }
thisCacheRoot := ""
if fromStage != "" { if fromStage != "" {
// do not create and use a cache directory on the host,
// instead use the location in the mounted stage or
// temporary directory as the cache
mountPoint := "" mountPoint := ""
if additionalMountPoints != nil { if additionalMountPoints != nil {
if val, ok := additionalMountPoints[fromStage]; ok { if val, ok := additionalMountPoints[fromStage]; ok {
@ -445,14 +487,9 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a
// Cache does not supports using image so if not stage found // Cache does not supports using image so if not stage found
// return with error // return with error
if mountPoint == "" { if mountPoint == "" {
return newMount, nil, fmt.Errorf("no stage or additional build context found with name %s", fromStage) return newMount, "", nil, fmt.Errorf("no stage or additional build context found with name %s", fromStage)
} }
// path should be /mountPoint/specified path thisCacheRoot = mountPoint
evaluated, err := copier.Eval(mountPoint, mountPoint+string(filepath.Separator)+newMount.Source, copier.EvalOptions{})
if err != nil {
return newMount, nil, err
}
newMount.Source = evaluated
} else { } else {
// we need to create cache on host if no image is being used // we need to create cache on host if no image is being used
@ -462,64 +499,73 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a
// cache parent directory: creates separate cache parent for each user. // cache parent directory: creates separate cache parent for each user.
cacheParent := CacheParent() cacheParent := CacheParent()
// create cache on host if not present // create cache on host if not present
err = os.MkdirAll(cacheParent, os.FileMode(0755)) err = os.MkdirAll(cacheParent, os.FileMode(0755))
if err != nil { if err != nil {
return newMount, nil, fmt.Errorf("unable to create build cache directory: %w", err) return newMount, "", nil, fmt.Errorf("unable to create build cache directory: %w", err)
} }
if id != "" { if id != "" {
// Don't let the user control where we place the directory. // Don't let the user control where we place the directory.
dirID := digest.FromString(id).Encoded()[:16] dirID := digest.FromString(id).Encoded()[:16]
newMount.Source = filepath.Join(cacheParent, dirID) thisCacheRoot = filepath.Join(cacheParent, dirID)
buildahLockFilesDir = filepath.Join(BuildahCacheLockfileDir, dirID) buildahLockFilesDir = filepath.Join(BuildahCacheLockfileDir, dirID)
} else { } else {
// Don't let the user control where we place the directory. // Don't let the user control where we place the directory.
dirID := digest.FromString(newMount.Destination).Encoded()[:16] dirID := digest.FromString(newMount.Destination).Encoded()[:16]
newMount.Source = filepath.Join(cacheParent, dirID) thisCacheRoot = filepath.Join(cacheParent, dirID)
buildahLockFilesDir = filepath.Join(BuildahCacheLockfileDir, dirID) buildahLockFilesDir = filepath.Join(BuildahCacheLockfileDir, dirID)
} }
idPair := idtools.IDPair{ idPair := idtools.IDPair{
UID: uid, UID: uid,
GID: gid, GID: gid,
} }
// buildkit parity: change uid and gid if specified otheriwise keep `0` // buildkit parity: change uid and gid if specified, otherwise keep `0`
err = idtools.MkdirAllAndChownNew(newMount.Source, os.FileMode(mode), idPair) err = idtools.MkdirAllAndChownNew(thisCacheRoot, os.FileMode(mode), idPair)
if err != nil { if err != nil {
return newMount, nil, fmt.Errorf("unable to change uid,gid of cache directory: %w", err) return newMount, "", nil, fmt.Errorf("unable to change uid,gid of cache directory: %w", err)
} }
// create a subdirectory inside `cacheParent` just to store lockfiles // create a subdirectory inside `cacheParent` just to store lockfiles
buildahLockFilesDir = filepath.Join(cacheParent, buildahLockFilesDir) buildahLockFilesDir = filepath.Join(cacheParent, buildahLockFilesDir)
err = os.MkdirAll(buildahLockFilesDir, os.FileMode(0700)) err = os.MkdirAll(buildahLockFilesDir, os.FileMode(0700))
if err != nil { if err != nil {
return newMount, nil, fmt.Errorf("unable to create build cache lockfiles directory: %w", err) return newMount, "", nil, fmt.Errorf("unable to create build cache lockfiles directory: %w", err)
} }
} }
var targetLock *lockfile.LockFile // = nil // path should be /mountPoint/specified path
evaluated, err := copier.Eval(thisCacheRoot, thisCacheRoot+string(filepath.Separator)+newMount.Source, copier.EvalOptions{})
if err != nil {
return newMount, "", nil, err
}
newMount.Source = evaluated
succeeded := false succeeded := false
defer func() { var targetLock *lockfile.LockFile
if !succeeded && targetLock != nil {
targetLock.Unlock()
}
}()
switch sharing { switch sharing {
case "locked": case "locked":
// lock parent cache // lock parent cache
lockfile, err := lockfile.GetLockFile(filepath.Join(buildahLockFilesDir, BuildahCacheLockfile)) lockfile, err := lockfile.GetLockFile(filepath.Join(buildahLockFilesDir, BuildahCacheLockfile))
if err != nil { if err != nil {
return newMount, nil, fmt.Errorf("unable to acquire lock when sharing mode is locked: %w", err) return newMount, "", nil, fmt.Errorf("unable to acquire lock when sharing mode is locked: %w", err)
} }
// Will be unlocked after the RUN step is executed. // Will be unlocked after the RUN step is executed.
lockfile.Lock() lockfile.Lock()
targetLock = lockfile targetLock = lockfile
defer func() {
if !succeeded {
targetLock.Unlock()
}
}()
case "shared": case "shared":
// do nothing since default is `shared` // do nothing since default is `shared`
break break
default: default:
// error out for unknown values // error out for unknown values
return newMount, nil, fmt.Errorf("unrecognized value %q for field `sharing`: %w", sharing, err) return newMount, "", nil, fmt.Errorf("unrecognized value %q for field `sharing`: %w", sharing, err)
} }
// buildkit parity: default sharing should be shared // buildkit parity: default sharing should be shared
@ -537,12 +583,29 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a
opts, err := parse.ValidateVolumeOpts(newMount.Options) opts, err := parse.ValidateVolumeOpts(newMount.Options)
if err != nil { if err != nil {
return newMount, nil, err return newMount, "", nil, err
} }
newMount.Options = opts newMount.Options = opts
var intermediateMount string
if newMount.Source != thisCacheRoot {
rel, err := filepath.Rel(thisCacheRoot, newMount.Source)
if err != nil {
return newMount, "", nil, fmt.Errorf("computing pathname of cache subdirectory: %w", err)
}
if rel != "." && rel != "/" {
mnt, err := bindFromChroot(thisCacheRoot, rel, tmpDir)
if err != nil {
return newMount, "", nil, fmt.Errorf("sanitizing cache subdirectory %q: %w", newMount.Source, err)
}
logrus.Debugf("bind-mounted %q under %q to %q", rel, thisCacheRoot, mnt)
intermediateMount = mnt
newMount.Source = intermediateMount
}
}
succeeded = true succeeded = true
return newMount, targetLock, nil return newMount, intermediateMount, targetLock, nil
} }
func getVolumeMounts(volumes []string) (map[string]specs.Mount, error) { func getVolumeMounts(volumes []string) (map[string]specs.Mount, error) {
@ -568,27 +631,41 @@ func UnlockLockArray(locks []*lockfile.LockFile) {
} }
} }
// GetVolumes gets the volumes from --volume and --mount // GetVolumes gets the volumes from --volume and --mount flags.
// //
// If this function succeeds, the caller must clean up the returned overlay // Returns a slice of Mounts to add to the runtime spec's list of mounts, the
// mounts, unmount the mounted images, and unlock the returned // IDs of any images we mounted, a slice of bind-mounted paths, a slice of
// *lockfile.LockFile s if any (when??). // overlay directories and a slice of locks that we acquired, or an error.
func GetVolumes(ctx *types.SystemContext, store storage.Store, mountLabel string, volumes []string, mounts []string, contextDir, workDir, tmpDir string) ([]specs.Mount, []string, []string, []*lockfile.LockFile, error) { //
unifiedMounts, mountedImages, overlayDirs, targetLocks, err := getMounts(ctx, store, mountLabel, mounts, contextDir, workDir, tmpDir) // The caller is expected to, after the command which uses the mounts and
// volumes exits, clean up the overlay directories, unmount and remove the
// mountpoints for the bind-mounted paths, unmount any images we mounted, and
// release the locks we returned (either using UnlockLockArray() or by
// iterating over them and unlocking them).
func GetVolumes(ctx *types.SystemContext, store storage.Store, mountLabel string, volumes []string, mounts []string, contextDir, workDir, tmpDir string) ([]specs.Mount, []string, []string, []string, []*lockfile.LockFile, error) {
unifiedMounts, mountedImages, intermediateMounts, overlayMounts, targetLocks, err := getMounts(ctx, store, mountLabel, mounts, contextDir, workDir, tmpDir)
if err != nil { if err != nil {
return nil, nil, nil, nil, err return nil, nil, nil, nil, nil, err
} }
succeeded := false succeeded := false
defer func() { defer func() {
if !succeeded { if !succeeded {
for _, overlayDir := range overlayDirs { for _, overlayMount := range overlayMounts {
if err := overlay.RemoveTemp(overlayDir); err != nil { if err := overlay.RemoveTemp(overlayMount); err != nil {
logrus.Debugf("unmounting overlay mount at %q: %v", overlayDir, err) logrus.Debugf("unmounting overlay at %q: %v", overlayMount, err)
} }
} }
for _, mountedImage := range mountedImages { for _, intermediateMount := range intermediateMounts {
if _, err := store.UnmountImage(mountedImage, false); err != nil { if err := mount.Unmount(intermediateMount); err != nil {
logrus.Debugf("unmounting image %q: %v", mountedImage, err) logrus.Debugf("unmounting intermediate mount point %q: %v", intermediateMount, err)
}
if err := os.Remove(intermediateMount); err != nil {
logrus.Debugf("removing should-be-empty directory %q: %v", intermediateMount, err)
}
}
for _, image := range mountedImages {
if _, err := store.UnmountImage(image, false); err != nil {
logrus.Debugf("unmounting image %q: %v", image, err)
} }
} }
UnlockLockArray(targetLocks) UnlockLockArray(targetLocks)
@ -596,11 +673,11 @@ func GetVolumes(ctx *types.SystemContext, store storage.Store, mountLabel string
}() }()
volumeMounts, err := getVolumeMounts(volumes) volumeMounts, err := getVolumeMounts(volumes)
if err != nil { if err != nil {
return nil, nil, nil, nil, err return nil, nil, nil, nil, nil, err
} }
for dest, mount := range volumeMounts { for dest, mount := range volumeMounts {
if _, ok := unifiedMounts[dest]; ok { if _, ok := unifiedMounts[dest]; ok {
return nil, nil, nil, nil, fmt.Errorf("%v: %w", dest, errDuplicateDest) return nil, nil, nil, nil, nil, fmt.Errorf("%v: %w", dest, errDuplicateDest)
} }
unifiedMounts[dest] = mount unifiedMounts[dest] = mount
} }
@ -610,33 +687,51 @@ func GetVolumes(ctx *types.SystemContext, store storage.Store, mountLabel string
finalMounts = append(finalMounts, mount) finalMounts = append(finalMounts, mount)
} }
succeeded = true succeeded = true
return finalMounts, mountedImages, overlayDirs, targetLocks, nil return finalMounts, mountedImages, intermediateMounts, overlayMounts, targetLocks, nil
} }
// getMounts takes user-provided input from the --mount flag and creates OCI // getMounts takes user-provided inputs from the --mount flag and returns a
// spec mounts. // slice of OCI spec mounts, a slice of mounted image IDs, a slice of other
// buildah run --mount type=bind,src=/etc/resolv.conf,target=/etc/resolv.conf ... // mount locations, a slice of overlay mounts, and a slice of locks, or an
// buildah run --mount type=tmpfs,target=/dev/shm ... // error.
// //
// If this function succeeds, the caller must unlock the returned *lockfile.LockFile s if any (when??). // buildah run --mount type=bind,src=/etc/resolv.conf,target=/etc/resolv.conf ...
func getMounts(ctx *types.SystemContext, store storage.Store, mountLabel string, mounts []string, contextDir, workDir, tmpDir string) (map[string]specs.Mount, []string, []string, []*lockfile.LockFile, error) { // buildah run --mount type=cache,target=/var/cache ...
// buildah run --mount type=tmpfs,target=/dev/shm ...
//
// The caller is expected to, after the command which uses the mounts exits,
// unmount the overlay filesystems (if we mounted any), unmount the other
// mounted filesystems and remove their mountpoints (if we provided any paths
// to mountpoints), unmount any mounted images (if we provided the IDs of any),
// and then unlock the locks we returned (either using UnlockLockArray() or by
// iterating over them and unlocking them).
func getMounts(ctx *types.SystemContext, store storage.Store, mountLabel string, mounts []string, contextDir, workDir, tmpDir string) (map[string]specs.Mount, []string, []string, []string, []*lockfile.LockFile, error) {
// If `type` is not set default to "bind" // If `type` is not set default to "bind"
mountType := define.TypeBind mountType := define.TypeBind
finalMounts := make(map[string]specs.Mount, len(mounts)) finalMounts := make(map[string]specs.Mount, len(mounts))
mountedImages := make([]string, 0, len(mounts)) mountedImages := make([]string, 0, len(mounts))
overlayDirs := make([]string, 0, len(mounts)) intermediateMounts := make([]string, 0, len(mounts))
overlayMounts := make([]string, 0, len(mounts))
targetLocks := make([]*lockfile.LockFile, 0, len(mounts)) targetLocks := make([]*lockfile.LockFile, 0, len(mounts))
succeeded := false succeeded := false
defer func() { defer func() {
if !succeeded { if !succeeded {
for _, overlayDir := range overlayDirs { for _, overlayDir := range overlayMounts {
if err := overlay.RemoveTemp(overlayDir); err != nil { if err := overlay.RemoveTemp(overlayDir); err != nil {
logrus.Debugf("unmounting overlay mount at %q: %v", overlayDir, err) logrus.Debugf("unmounting overlay mount at %q: %v", overlayDir, err)
} }
} }
for _, mountedImage := range mountedImages { for _, intermediateMount := range intermediateMounts {
if _, err := store.UnmountImage(mountedImage, false); err != nil { if err := mount.Unmount(intermediateMount); err != nil {
logrus.Debugf("unmounting image %q: %v", mountedImage, err) logrus.Debugf("unmounting intermediate mount point %q: %v", intermediateMount, err)
}
if err := os.Remove(intermediateMount); err != nil {
logrus.Debugf("removing should-be-empty directory %q: %v", intermediateMount, err)
}
}
for _, image := range mountedImages {
if _, err := store.UnmountImage(image, false); err != nil {
logrus.Debugf("unmounting image %q: %v", image, err)
} }
} }
UnlockLockArray(targetLocks) UnlockLockArray(targetLocks)
@ -651,61 +746,67 @@ func getMounts(ctx *types.SystemContext, store storage.Store, mountLabel string,
for _, mount := range mounts { for _, mount := range mounts {
tokens := strings.Split(mount, ",") tokens := strings.Split(mount, ",")
if len(tokens) < 2 { if len(tokens) < 2 {
return nil, nil, nil, nil, fmt.Errorf("%q: %w", mount, errInvalidSyntax) return nil, nil, nil, nil, nil, fmt.Errorf("%q: %w", mount, errInvalidSyntax)
} }
for _, field := range tokens { for _, field := range tokens {
if strings.HasPrefix(field, "type=") { if strings.HasPrefix(field, "type=") {
kv := strings.Split(field, "=") kv := strings.Split(field, "=")
if len(kv) != 2 { if len(kv) != 2 {
return nil, nil, nil, nil, fmt.Errorf("%q: %w", mount, errInvalidSyntax) return nil, nil, nil, nil, nil, fmt.Errorf("%q: %w", mount, errInvalidSyntax)
} }
mountType = kv[1] mountType = kv[1]
} }
} }
switch mountType { switch mountType {
case define.TypeBind: case define.TypeBind:
mount, image, overlayDir, err := GetBindMount(ctx, tokens, contextDir, store, mountLabel, nil, workDir, tmpDir) mount, image, intermediateMount, overlayMount, err := GetBindMount(ctx, tokens, contextDir, store, mountLabel, nil, workDir, tmpDir)
if err != nil { if err != nil {
return nil, nil, nil, nil, err return nil, nil, nil, nil, nil, err
} }
if image != "" { if image != "" {
mountedImages = append(mountedImages, image) mountedImages = append(mountedImages, image)
} }
if overlayDir != "" { if intermediateMount != "" {
overlayDirs = append(overlayDirs, overlayDir) intermediateMounts = append(intermediateMounts, intermediateMount)
}
if overlayMount != "" {
overlayMounts = append(overlayMounts, overlayMount)
} }
if _, ok := finalMounts[mount.Destination]; ok { if _, ok := finalMounts[mount.Destination]; ok {
return nil, nil, nil, nil, fmt.Errorf("%v: %w", mount.Destination, errDuplicateDest) return nil, nil, nil, nil, nil, fmt.Errorf("%v: %w", mount.Destination, errDuplicateDest)
} }
finalMounts[mount.Destination] = mount finalMounts[mount.Destination] = mount
case TypeCache: case TypeCache:
mount, tl, err := GetCacheMount(tokens, store, "", nil, workDir) mount, intermediateMount, tl, err := GetCacheMount(tokens, nil, workDir, tmpDir)
if err != nil { if err != nil {
return nil, nil, nil, nil, err return nil, nil, nil, nil, nil, err
}
if intermediateMount != "" {
intermediateMounts = append(intermediateMounts, intermediateMount)
} }
if tl != nil { if tl != nil {
targetLocks = append(targetLocks, tl) targetLocks = append(targetLocks, tl)
} }
if _, ok := finalMounts[mount.Destination]; ok { if _, ok := finalMounts[mount.Destination]; ok {
return nil, nil, nil, nil, fmt.Errorf("%v: %w", mount.Destination, errDuplicateDest) return nil, nil, nil, nil, nil, fmt.Errorf("%v: %w", mount.Destination, errDuplicateDest)
} }
finalMounts[mount.Destination] = mount finalMounts[mount.Destination] = mount
case TypeTmpfs: case TypeTmpfs:
mount, err := GetTmpfsMount(tokens) mount, err := GetTmpfsMount(tokens)
if err != nil { if err != nil {
return nil, nil, nil, nil, err return nil, nil, nil, nil, nil, err
} }
if _, ok := finalMounts[mount.Destination]; ok { if _, ok := finalMounts[mount.Destination]; ok {
return nil, nil, nil, nil, fmt.Errorf("%v: %w", mount.Destination, errDuplicateDest) return nil, nil, nil, nil, nil, fmt.Errorf("%v: %w", mount.Destination, errDuplicateDest)
} }
finalMounts[mount.Destination] = mount finalMounts[mount.Destination] = mount
default: default:
return nil, nil, nil, nil, fmt.Errorf("invalid filesystem type %q", mountType) return nil, nil, nil, nil, nil, fmt.Errorf("invalid filesystem type %q", mountType)
} }
} }
succeeded = true succeeded = true
return finalMounts, mountedImages, overlayDirs, targetLocks, nil return finalMounts, mountedImages, intermediateMounts, overlayMounts, targetLocks, nil
} }
// GetTmpfsMount parses a single tmpfs mount entry from the --mount flag // GetTmpfsMount parses a single tmpfs mount entry from the --mount flag

2
run.go
View File

@ -184,6 +184,8 @@ type runMountArtifacts struct {
SSHAuthSock string SSHAuthSock string
// Lock files, which should have their Unlock() methods called // Lock files, which should have their Unlock() methods called
TargetLocks []*lockfile.LockFile TargetLocks []*lockfile.LockFile
// Intermediate mount points, which should be Unmount()ed and Removed()d
IntermediateMounts []string
} }
// RunMountInfo are the available run mounts for this run // RunMountInfo are the available run mounts for this run

View File

@ -42,6 +42,7 @@ import (
"github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/lockfile" "github.com/containers/storage/pkg/lockfile"
"github.com/containers/storage/pkg/mount"
"github.com/containers/storage/pkg/reexec" "github.com/containers/storage/pkg/reexec"
"github.com/containers/storage/pkg/unshare" "github.com/containers/storage/pkg/unshare"
"github.com/opencontainers/go-digest" "github.com/opencontainers/go-digest"
@ -1519,6 +1520,7 @@ func (b *Builder) runSetupRunMounts(mountPoint, bundlePath string, mounts []stri
mountTargets := make([]string, 0, len(mounts)) mountTargets := make([]string, 0, len(mounts))
tmpFiles := make([]string, 0, len(mounts)) tmpFiles := make([]string, 0, len(mounts))
mountImages := make([]string, 0, len(mounts)) mountImages := make([]string, 0, len(mounts))
intermediateMounts := make([]string, 0, len(mounts))
finalMounts := make([]specs.Mount, 0, len(mounts)) finalMounts := make([]specs.Mount, 0, len(mounts))
agents := make([]*sshagent.AgentServer, 0, len(mounts)) agents := make([]*sshagent.AgentServer, 0, len(mounts))
defaultSSHSock := "" defaultSSHSock := ""
@ -1538,6 +1540,14 @@ func (b *Builder) runSetupRunMounts(mountPoint, bundlePath string, mounts []stri
b.Logger.Error(err.Error()) b.Logger.Error(err.Error())
} }
} }
for _, intermediateMount := range intermediateMounts {
if err := mount.Unmount(intermediateMount); err != nil {
b.Logger.Errorf("unmounting %q: %v", intermediateMount, err)
}
if err := os.Remove(intermediateMount); err != nil {
b.Logger.Errorf("removing should-be-empty directory %q: %v", intermediateMount, err)
}
}
for _, mountImage := range mountImages { for _, mountImage := range mountImages {
if _, err := b.store.UnmountImage(mountImage, false); err != nil { if _, err := b.store.UnmountImage(mountImage, false); err != nil {
b.Logger.Error(err.Error()) b.Logger.Error(err.Error())
@ -1554,9 +1564,10 @@ func (b *Builder) runSetupRunMounts(mountPoint, bundlePath string, mounts []stri
for _, mount := range mounts { for _, mount := range mounts {
var mountSpec *specs.Mount var mountSpec *specs.Mount
var err error var err error
var envFile, image, bundleMountsDir, overlayDir string var envFile, image, bundleMountsDir, overlayDir, intermediateMount string
var agent *sshagent.AgentServer var agent *sshagent.AgentServer
var tl *lockfile.LockFile var tl *lockfile.LockFile
tokens := strings.Split(mount, ",") tokens := strings.Split(mount, ",")
// If `type` is not set default to TypeBind // If `type` is not set default to TypeBind
@ -1601,7 +1612,7 @@ func (b *Builder) runSetupRunMounts(mountPoint, bundlePath string, mounts []stri
return nil, nil, err return nil, nil, err
} }
} }
mountSpec, image, overlayDir, err = b.getBindMount(tokens, sources.SystemContext, sources.ContextDir, sources.StageMountPoints, idMaps, sources.WorkDir, bundleMountsDir) mountSpec, image, intermediateMount, overlayDir, err = b.getBindMount(tokens, sources.SystemContext, sources.ContextDir, sources.StageMountPoints, idMaps, sources.WorkDir, bundleMountsDir)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
@ -1611,6 +1622,9 @@ func (b *Builder) runSetupRunMounts(mountPoint, bundlePath string, mounts []stri
if overlayDir != "" { if overlayDir != "" {
overlayDirs = append(overlayDirs, overlayDir) overlayDirs = append(overlayDirs, overlayDir)
} }
if intermediateMount != "" {
intermediateMounts = append(intermediateMounts, intermediateMount)
}
finalMounts = append(finalMounts, *mountSpec) finalMounts = append(finalMounts, *mountSpec)
case "tmpfs": case "tmpfs":
mountSpec, err = b.getTmpfsMount(tokens, idMaps) mountSpec, err = b.getTmpfsMount(tokens, idMaps)
@ -1619,10 +1633,18 @@ func (b *Builder) runSetupRunMounts(mountPoint, bundlePath string, mounts []stri
} }
finalMounts = append(finalMounts, *mountSpec) finalMounts = append(finalMounts, *mountSpec)
case "cache": case "cache":
mountSpec, tl, err = b.getCacheMount(tokens, sources.StageMountPoints, idMaps, sources.WorkDir) if bundleMountsDir == "" {
if bundleMountsDir, err = os.MkdirTemp(bundlePath, "mounts"); err != nil {
return nil, nil, err
}
}
mountSpec, intermediateMount, tl, err = b.getCacheMount(tokens, sources.StageMountPoints, idMaps, sources.WorkDir, bundleMountsDir)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
if intermediateMount != "" {
intermediateMounts = append(intermediateMounts, intermediateMount)
}
if tl != nil { if tl != nil {
targetLocks = append(targetLocks, tl) targetLocks = append(targetLocks, tl)
} }
@ -1646,32 +1668,33 @@ func (b *Builder) runSetupRunMounts(mountPoint, bundlePath string, mounts []stri
} }
succeeded = true succeeded = true
artifacts := &runMountArtifacts{ artifacts := &runMountArtifacts{
RunMountTargets: mountTargets, RunMountTargets: mountTargets,
RunOverlayDirs: overlayDirs, RunOverlayDirs: overlayDirs,
TmpFiles: tmpFiles, TmpFiles: tmpFiles,
Agents: agents, Agents: agents,
MountedImages: mountImages, MountedImages: mountImages,
SSHAuthSock: defaultSSHSock, SSHAuthSock: defaultSSHSock,
TargetLocks: targetLocks, TargetLocks: targetLocks,
IntermediateMounts: intermediateMounts,
} }
return finalMounts, artifacts, nil return finalMounts, artifacts, nil
} }
func (b *Builder) getBindMount(tokens []string, sys *types.SystemContext, contextDir string, stageMountPoints map[string]internal.StageMountDetails, idMaps IDMaps, workDir, tmpDir string) (*specs.Mount, string, string, error) { func (b *Builder) getBindMount(tokens []string, sys *types.SystemContext, contextDir string, stageMountPoints map[string]internal.StageMountDetails, idMaps IDMaps, workDir, tmpDir string) (*specs.Mount, string, string, string, error) {
if contextDir == "" { if contextDir == "" {
return nil, "", "", errors.New("context directory for current run invocation is not configured") return nil, "", "", "", errors.New("context directory for current run invocation is not configured")
} }
var optionMounts []specs.Mount var optionMounts []specs.Mount
mount, image, overlayDir, err := volumes.GetBindMount(sys, tokens, contextDir, b.store, b.MountLabel, stageMountPoints, workDir, tmpDir) mount, image, intermediateMount, overlayMount, err := volumes.GetBindMount(sys, tokens, contextDir, b.store, b.MountLabel, stageMountPoints, workDir, tmpDir)
if err != nil { if err != nil {
return nil, image, overlayDir, err return nil, "", "", "", err
} }
optionMounts = append(optionMounts, mount) optionMounts = append(optionMounts, mount)
volumes, err := b.runSetupVolumeMounts(b.MountLabel, nil, optionMounts, idMaps) volumes, err := b.runSetupVolumeMounts(b.MountLabel, nil, optionMounts, idMaps)
if err != nil { if err != nil {
return nil, image, overlayDir, err return nil, "", "", "", err
} }
return &volumes[0], image, overlayDir, nil return &volumes[0], image, intermediateMount, overlayMount, nil
} }
func (b *Builder) getTmpfsMount(tokens []string, idMaps IDMaps) (*specs.Mount, error) { func (b *Builder) getTmpfsMount(tokens []string, idMaps IDMaps) (*specs.Mount, error) {
@ -1945,9 +1968,9 @@ func (b *Builder) cleanupTempVolumes() {
// cleanupRunMounts cleans up run mounts so they only appear in this run. // cleanupRunMounts cleans up run mounts so they only appear in this run.
func (b *Builder) cleanupRunMounts(mountpoint string, artifacts *runMountArtifacts) error { func (b *Builder) cleanupRunMounts(mountpoint string, artifacts *runMountArtifacts) error {
for _, agent := range artifacts.Agents { for _, agent := range artifacts.Agents {
err := agent.Shutdown() servePath := agent.ServePath()
if err != nil { if err := agent.Shutdown(); err != nil {
return err return fmt.Errorf("shutting down SSH agent at %q: %v", servePath, err)
} }
} }
// clean up any overlays we mounted // clean up any overlays we mounted
@ -1956,6 +1979,15 @@ func (b *Builder) cleanupRunMounts(mountpoint string, artifacts *runMountArtifac
return err return err
} }
} }
// unmount anything that needs unmounting
for _, intermediateMount := range artifacts.IntermediateMounts {
if err := mount.Unmount(intermediateMount); err != nil && !errors.Is(err, os.ErrNotExist) {
return fmt.Errorf("unmounting %q: %w", intermediateMount, err)
}
if err := os.Remove(intermediateMount); err != nil && !errors.Is(err, os.ErrNotExist) {
return fmt.Errorf("removing should-be-empty directory %q: %w", intermediateMount, err)
}
}
// unmount any images we mounted for this run // unmount any images we mounted for this run
for _, image := range artifacts.MountedImages { for _, image := range artifacts.MountedImages {
if _, err := b.store.UnmountImage(image, false); err != nil { if _, err := b.store.UnmountImage(image, false); err != nil {

View File

@ -351,9 +351,12 @@ func setupSpecialMountSpecChanges(spec *spec.Spec, shmSize string) ([]specs.Moun
return spec.Mounts, nil return spec.Mounts, nil
} }
// If this function succeeds and returns a non-nil *lockfile.LockFile, the caller must unlock it (when??). // If this succeeded, the caller would be expected to, after the command which
func (b *Builder) getCacheMount(tokens []string, stageMountPoints map[string]internal.StageMountDetails, idMaps IDMaps, workDir string) (*spec.Mount, *lockfile.LockFile, error) { // uses the mount exits, unmount the mounted filesystem and remove its
return nil, nil, errors.New("cache mounts not supported on freebsd") // mountpoint (if we provided the path to its mountpoint), and release the lock
// (if we took one).
func (b *Builder) getCacheMount(tokens []string, stageMountPoints map[string]internal.StageMountDetails, idMaps IDMaps, workDir, tmpDir string) (*specs.Mount, string, *lockfile.LockFile, error) {
return nil, "", nil, errors.New("cache mounts not supported on freebsd")
} }
func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string, optionMounts []specs.Mount, idMaps IDMaps) (mounts []specs.Mount, Err error) { func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string, optionMounts []specs.Mount, idMaps IDMaps) (mounts []specs.Mount, Err error) {

View File

@ -37,6 +37,7 @@ import (
"github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/lockfile" "github.com/containers/storage/pkg/lockfile"
"github.com/containers/storage/pkg/mount"
"github.com/containers/storage/pkg/stringid" "github.com/containers/storage/pkg/stringid"
"github.com/containers/storage/pkg/unshare" "github.com/containers/storage/pkg/unshare"
"github.com/docker/go-units" "github.com/docker/go-units"
@ -1273,24 +1274,39 @@ func checkIdsGreaterThan5(ids []specs.LinuxIDMapping) bool {
return false return false
} }
// If this function succeeds and returns a non-nil *lockfile.LockFile, the caller must unlock it (when??). // Returns a Mount to add to the runtime spec's list of mounts, an optional
func (b *Builder) getCacheMount(tokens []string, stageMountPoints map[string]internal.StageMountDetails, idMaps IDMaps, workDir string) (*specs.Mount, *lockfile.LockFile, error) { // path of a mounted filesystem, unmounted, and an optional lock, or an error.
//
// The caller is expected to, after the command which uses the mount exits,
// unmount the mounted filesystem (if we provided the path to its mountpoint)
// and remove its mountpoint, , and release the lock (if we took one).
func (b *Builder) getCacheMount(tokens []string, stageMountPoints map[string]internal.StageMountDetails, idMaps IDMaps, workDir, tmpDir string) (*specs.Mount, string, *lockfile.LockFile, error) {
var optionMounts []specs.Mount var optionMounts []specs.Mount
mount, targetLock, err := volumes.GetCacheMount(tokens, b.store, b.MountLabel, stageMountPoints, workDir) optionMount, intermediateMount, targetLock, err := volumes.GetCacheMount(tokens, stageMountPoints, workDir, tmpDir)
if err != nil { if err != nil {
return nil, nil, err return nil, "", nil, err
} }
succeeded := false succeeded := false
defer func() { defer func() {
if !succeeded && targetLock != nil { if !succeeded {
targetLock.Unlock() if intermediateMount != "" {
if err := mount.Unmount(intermediateMount); err != nil {
b.Logger.Debugf("unmounting %q: %v", intermediateMount, err)
}
if err := os.Remove(intermediateMount); err != nil {
b.Logger.Debugf("removing should-be-empty directory %q: %v", intermediateMount, err)
}
}
if targetLock != nil {
targetLock.Unlock()
}
} }
}() }()
optionMounts = append(optionMounts, mount) optionMounts = append(optionMounts, optionMount)
volumes, err := b.runSetupVolumeMounts(b.MountLabel, nil, optionMounts, idMaps) volumes, err := b.runSetupVolumeMounts(b.MountLabel, nil, optionMounts, idMaps)
if err != nil { if err != nil {
return nil, nil, err return nil, "", nil, err
} }
succeeded = true succeeded = true
return &volumes[0], targetLock, nil return &volumes[0], intermediateMount, targetLock, nil
} }

View File

@ -6707,8 +6707,8 @@ RUN --mount=type=cache,source=../../../../../../../../../../../$TEST_SCRATCH_DIR
ls -l /var/tmp && cat /var/tmp/file.txt ls -l /var/tmp && cat /var/tmp/file.txt
EOF EOF
run_buildah 1 build --no-cache ${TEST_SCRATCH_DIR} run_buildah 125 build --no-cache ${TEST_SCRATCH_DIR}
expect_output --substring "cat: can't open '/var/tmp/file.txt': No such file or directory" expect_output --substring "no such file or directory"
mkdir ${TEST_SCRATCH_DIR}/cve20249675 mkdir ${TEST_SCRATCH_DIR}/cve20249675
cat > ${TEST_SCRATCH_DIR}/cve20249675/Containerfile <<EOF cat > ${TEST_SCRATCH_DIR}/cve20249675/Containerfile <<EOF

View File

@ -1,2 +1,3 @@
FROM scratch FROM scratch
COPY hello . COPY hello hello1 .
COPY hello2 /subdir/hello

View File

@ -0,0 +1 @@
hello1

View File

@ -460,8 +460,10 @@ function configure_and_check_user() {
_prefetch alpine _prefetch alpine
run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
cid=$output cid=$output
run_buildah run --mount type=bind,source=.,from=buildkitbase,target=/test${zflag} $cid cat /test/hello run_buildah run --mount type=bind,source=.,from=buildkitbase,target=/test${zflag} $cid cat /test/hello1
expect_output --substring "hello" expect_output --substring "hello1"
run_buildah run --mount type=bind,source=subdir,from=buildkitbase,target=/test${zflag} $cid cat /test/hello
expect_output --substring "hello2"
} }
@test "run --mount=type=cache like buildkit" { @test "run --mount=type=cache like buildkit" {
@ -475,8 +477,8 @@ function configure_and_check_user() {
_prefetch alpine _prefetch alpine
run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
cid=$output cid=$output
run_buildah run --mount type=cache,target=/test${zflag} $cid sh -c 'echo "hello" > /test/hello && cat /test/hello' run_buildah run --mount type=cache,target=/test${zflag} $cid sh -c 'mkdir -p /test/subdir && echo "hello" > /test/subdir/h.txt && cat /test/subdir/h.txt'
run_buildah run --mount type=cache,target=/test${zflag} $cid cat /test/hello run_buildah run --mount type=cache,src=subdir,target=/test${zflag} $cid cat /test/h.txt
expect_output --substring "hello" expect_output --substring "hello"
} }