Fix TOCTOU error when bind and cache mounts use "src" values
Fix a time-of-check/time-of-use error when mounting type=bind and type=cache directories that use a "src" flag. A hostile writer could use a concurrently-running stage or build to replace that "src" location between the point when we had resolved possible symbolic links and when runc/crun/whatever actually went to create the bind mount (CVE-2024-11218). Stop ignoring the "src" option for cache mounts when there's no "from" option. Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
This commit is contained in:
parent
2c05dfd1c7
commit
8b46071ab5
|
@ -13,6 +13,7 @@ import (
|
|||
"github.com/containers/buildah/pkg/overlay"
|
||||
"github.com/containers/buildah/pkg/parse"
|
||||
"github.com/containers/buildah/util"
|
||||
"github.com/containers/storage/pkg/mount"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
|
@ -184,7 +185,7 @@ func runCmd(c *cobra.Command, args []string, iopts runInputOptions) error {
|
|||
if err != nil {
|
||||
return fmt.Errorf("building system context: %w", err)
|
||||
}
|
||||
mounts, mountedImages, _, targetLocks, err := volumes.GetVolumes(systemContext, store, builder.MountLabel, iopts.volumes, iopts.mounts, iopts.contextDir, iopts.workingDir, tmpDir)
|
||||
mounts, mountedImages, intermediateMounts, _, targetLocks, err := volumes.GetVolumes(systemContext, store, builder.MountLabel, iopts.volumes, iopts.mounts, iopts.contextDir, iopts.workingDir, tmpDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -192,6 +193,14 @@ func runCmd(c *cobra.Command, args []string, iopts runInputOptions) error {
|
|||
if err := overlay.CleanupContent(tmpDir); err != nil {
|
||||
logrus.Debugf("unmounting overlay mounts under %q: %v", tmpDir, err)
|
||||
}
|
||||
for _, intermediateMount := range intermediateMounts {
|
||||
if err := mount.Unmount(intermediateMount); err != nil {
|
||||
logrus.Debugf("unmounting mount %q: %v", intermediateMount, err)
|
||||
}
|
||||
if err := os.Remove(intermediateMount); err != nil {
|
||||
logrus.Debugf("removing should-be-empty mount directory %q: %v", intermediateMount, err)
|
||||
}
|
||||
}
|
||||
for _, mountedImage := range mountedImages {
|
||||
if _, err := store.UnmountImage(mountedImage, false); err != nil {
|
||||
logrus.Debugf("unmounting image %q: %v", mountedImage, err)
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
"github.com/containers/storage"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/lockfile"
|
||||
"github.com/containers/storage/pkg/mount"
|
||||
"github.com/containers/storage/pkg/unshare"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
|
@ -117,10 +118,19 @@ func convertToOverlay(m specs.Mount, store storage.Store, mountLabel, tmpDir str
|
|||
}
|
||||
|
||||
// FIXME: this code needs to be merged with pkg/parse/parse.go ValidateVolumeOpts
|
||||
//
|
||||
// GetBindMount parses a single bind mount entry from the --mount flag.
|
||||
// Returns specifiedMount and a string which contains name of image that we mounted otherwise its empty.
|
||||
// Caller is expected to perform unmount of any mounted images
|
||||
func GetBindMount(sys *types.SystemContext, args []string, contextDir string, store storage.Store, mountLabel string, additionalMountPoints map[string]internal.StageMountDetails, workDir, tmpDir string) (specs.Mount, string, string, error) {
|
||||
//
|
||||
// Returns a Mount to add to the runtime spec's list of mounts, the ID of the
|
||||
// image we mounted if we mounted one, the path of a mounted location if one
|
||||
// needs to be unmounted and removed, and the path of an overlay mount if one
|
||||
// needs to be cleaned up, or an error.
|
||||
//
|
||||
// The caller is expected to, after the command which uses the mount exits,
|
||||
// clean up the overlay filesystem (if we provided a path to it), unmount and
|
||||
// remove the mountpoint for the mounted filesystem (if we provided the path to
|
||||
// its mountpoint), and then unmount the image (if we mounted one).
|
||||
func GetBindMount(sys *types.SystemContext, args []string, contextDir string, store storage.Store, mountLabel string, additionalMountPoints map[string]internal.StageMountDetails, workDir, tmpDir string) (specs.Mount, string, string, string, error) {
|
||||
newMount := specs.Mount{
|
||||
Type: define.TypeBind,
|
||||
}
|
||||
|
@ -152,39 +162,39 @@ func GetBindMount(sys *types.SystemContext, args []string, contextDir string, st
|
|||
mountReadability = "ro"
|
||||
case "shared", "rshared", "private", "rprivate", "slave", "rslave", "Z", "z", "U", "no-dereference":
|
||||
if hasArgValue {
|
||||
return newMount, "", "", fmt.Errorf("%v: %w", val, errBadOptionArg)
|
||||
return newMount, "", "", "", fmt.Errorf("%v: %w", val, errBadOptionArg)
|
||||
}
|
||||
newMount.Options = append(newMount.Options, argName)
|
||||
case "from":
|
||||
if !hasArgValue {
|
||||
return newMount, "", "", fmt.Errorf("%v: %w", argName, errBadOptionArg)
|
||||
return newMount, "", "", "", fmt.Errorf("%v: %w", argName, errBadOptionArg)
|
||||
}
|
||||
fromImage = argValue
|
||||
case "bind-propagation":
|
||||
if !hasArgValue {
|
||||
return newMount, "", "", fmt.Errorf("%v: %w", argName, errBadOptionArg)
|
||||
return newMount, "", "", "", fmt.Errorf("%v: %w", argName, errBadOptionArg)
|
||||
}
|
||||
switch argValue {
|
||||
default:
|
||||
return newMount, "", "", fmt.Errorf("%v: %q: %w", argName, argValue, errBadMntOption)
|
||||
return newMount, "", "", "", fmt.Errorf("%v: %q: %w", argName, argValue, errBadMntOption)
|
||||
case "shared", "rshared", "private", "rprivate", "slave", "rslave":
|
||||
// this should be the relevant parts of the same list of options we accepted above
|
||||
}
|
||||
switch argValue {
|
||||
default:
|
||||
return newMount, "", "", fmt.Errorf("%v: %q: %w", argName, argValue, errBadMntOption)
|
||||
return newMount, "", "", "", fmt.Errorf("%v: %q: %w", argName, argValue, errBadMntOption)
|
||||
case "shared", "rshared", "private", "rprivate", "slave", "rslave":
|
||||
// this should be the relevant parts of the same list of options we accepted above
|
||||
}
|
||||
newMount.Options = append(newMount.Options, argValue)
|
||||
case "src", "source":
|
||||
if !hasArgValue {
|
||||
return newMount, "", "", fmt.Errorf("%v: %w", argName, errBadOptionArg)
|
||||
return newMount, "", "", "", fmt.Errorf("%v: %w", argName, errBadOptionArg)
|
||||
}
|
||||
newMount.Source = argValue
|
||||
case "target", "dst", "destination":
|
||||
if !hasArgValue {
|
||||
return newMount, "", "", fmt.Errorf("%v: %w", argName, errBadOptionArg)
|
||||
return newMount, "", "", "", fmt.Errorf("%v: %w", argName, errBadOptionArg)
|
||||
}
|
||||
targetPath := argValue
|
||||
setDest = targetPath
|
||||
|
@ -192,12 +202,12 @@ func GetBindMount(sys *types.SystemContext, args []string, contextDir string, st
|
|||
targetPath = filepath.Join(workDir, targetPath)
|
||||
}
|
||||
if err := parse.ValidateVolumeCtrDir(targetPath); err != nil {
|
||||
return newMount, "", "", err
|
||||
return newMount, "", "", "", err
|
||||
}
|
||||
newMount.Destination = targetPath
|
||||
case "relabel":
|
||||
if setRelabel != "" {
|
||||
return newMount, "", "", fmt.Errorf("cannot pass 'relabel' option more than once: %w", errBadOptionArg)
|
||||
return newMount, "", "", "", fmt.Errorf("cannot pass 'relabel' option more than once: %w", errBadOptionArg)
|
||||
}
|
||||
setRelabel = argValue
|
||||
switch argValue {
|
||||
|
@ -206,14 +216,14 @@ func GetBindMount(sys *types.SystemContext, args []string, contextDir string, st
|
|||
case "shared":
|
||||
newMount.Options = append(newMount.Options, "z")
|
||||
default:
|
||||
return newMount, "", "", fmt.Errorf("%s mount option must be 'private' or 'shared': %w", argName, errBadMntOption)
|
||||
return newMount, "", "", "", fmt.Errorf("%s mount option must be 'private' or 'shared': %w", argName, errBadMntOption)
|
||||
}
|
||||
case "consistency":
|
||||
// Option for OS X only, has no meaning on other platforms
|
||||
// and can thus be safely ignored.
|
||||
// See also the handling of the equivalent "delegated" and "cached" in ValidateVolumeOpts
|
||||
default:
|
||||
return newMount, "", "", fmt.Errorf("%v: %w", argName, errBadMntOption)
|
||||
return newMount, "", "", "", fmt.Errorf("%v: %w", argName, errBadMntOption)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -237,12 +247,12 @@ func GetBindMount(sys *types.SystemContext, args []string, contextDir string, st
|
|||
if mountPoint == "" {
|
||||
image, err := internalUtil.LookupImage(sys, store, fromImage)
|
||||
if err != nil {
|
||||
return newMount, "", "", err
|
||||
return newMount, "", "", "", err
|
||||
}
|
||||
|
||||
mountPoint, err = image.Mount(context.Background(), nil, mountLabel)
|
||||
if err != nil {
|
||||
return newMount, "", "", err
|
||||
return newMount, "", "", "", err
|
||||
}
|
||||
mountedImage = image.ID()
|
||||
defer func() {
|
||||
|
@ -263,7 +273,7 @@ func GetBindMount(sys *types.SystemContext, args []string, contextDir string, st
|
|||
}
|
||||
|
||||
if setDest == "" {
|
||||
return newMount, "", "", errBadVolDest
|
||||
return newMount, "", "", "", errBadVolDest
|
||||
}
|
||||
|
||||
// buildkit parity: support absolute path for sources from current build context
|
||||
|
@ -271,42 +281,64 @@ func GetBindMount(sys *types.SystemContext, args []string, contextDir string, st
|
|||
// path should be /contextDir/specified path
|
||||
evaluated, err := copier.Eval(contextDir, contextDir+string(filepath.Separator)+newMount.Source, copier.EvalOptions{})
|
||||
if err != nil {
|
||||
return newMount, "", "", err
|
||||
return newMount, "", "", "", err
|
||||
}
|
||||
newMount.Source = evaluated
|
||||
} else {
|
||||
// looks like its coming from `build run --mount=type=bind` allow using absolute path
|
||||
// error out if no source is set
|
||||
if newMount.Source == "" {
|
||||
return newMount, "", "", errBadVolSrc
|
||||
return newMount, "", "", "", errBadVolSrc
|
||||
}
|
||||
if err := parse.ValidateVolumeHostDir(newMount.Source); err != nil {
|
||||
return newMount, "", "", err
|
||||
return newMount, "", "", "", err
|
||||
}
|
||||
}
|
||||
|
||||
opts, err := parse.ValidateVolumeOpts(newMount.Options)
|
||||
if err != nil {
|
||||
return newMount, "", "", err
|
||||
return newMount, "", "", "", err
|
||||
}
|
||||
newMount.Options = opts
|
||||
|
||||
var intermediateMount string
|
||||
if contextDir != "" && newMount.Source != contextDir {
|
||||
rel, err := filepath.Rel(contextDir, newMount.Source)
|
||||
if err != nil {
|
||||
return newMount, "", "", "", fmt.Errorf("computing pathname of bind subdirectory: %w", err)
|
||||
}
|
||||
if rel != "." && rel != "/" {
|
||||
mnt, err := bindFromChroot(contextDir, rel, tmpDir)
|
||||
if err != nil {
|
||||
return newMount, "", "", "", fmt.Errorf("sanitizing bind subdirectory %q: %w", newMount.Source, err)
|
||||
}
|
||||
logrus.Debugf("bind-mounted %q under %q to %q", rel, contextDir, mnt)
|
||||
intermediateMount = mnt
|
||||
newMount.Source = intermediateMount
|
||||
}
|
||||
}
|
||||
|
||||
overlayDir := ""
|
||||
if mountedImage != "" || mountIsReadWrite(newMount) {
|
||||
if newMount, overlayDir, err = convertToOverlay(newMount, store, mountLabel, tmpDir, 0, 0); err != nil {
|
||||
return newMount, "", "", err
|
||||
return newMount, "", "", "", err
|
||||
}
|
||||
}
|
||||
|
||||
succeeded = true
|
||||
|
||||
return newMount, mountedImage, overlayDir, nil
|
||||
return newMount, mountedImage, intermediateMount, overlayDir, nil
|
||||
}
|
||||
|
||||
// GetCacheMount parses a single cache mount entry from the --mount flag.
|
||||
//
|
||||
// If this function succeeds and returns a non-nil *lockfile.LockFile, the caller must unlock it (when??).
|
||||
func GetCacheMount(args []string, store storage.Store, imageMountLabel string, additionalMountPoints map[string]internal.StageMountDetails, workDir string) (specs.Mount, *lockfile.LockFile, error) {
|
||||
// Returns a Mount to add to the runtime spec's list of mounts, the path of a
|
||||
// mounted filesystem if one needs to be unmounted, and an optional lock that
|
||||
// needs to be released, or an error.
|
||||
//
|
||||
// The caller is expected to, after the command which uses the mount exits,
|
||||
// unmount and remove the mountpoint of the mounted filesystem (if we provided
|
||||
// the path to its mountpoint) and release the lock (if we took one).
|
||||
func GetCacheMount(args []string, additionalMountPoints map[string]internal.StageMountDetails, workDir, tmpDir string) (specs.Mount, string, *lockfile.LockFile, error) {
|
||||
var err error
|
||||
var mode uint64
|
||||
var buildahLockFilesDir string
|
||||
|
@ -357,69 +389,75 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a
|
|||
sharing = argValue
|
||||
case "bind-propagation":
|
||||
if !hasArgValue {
|
||||
return newMount, nil, fmt.Errorf("%v: %w", argName, errBadOptionArg)
|
||||
return newMount, "", nil, fmt.Errorf("%v: %w", argName, errBadOptionArg)
|
||||
}
|
||||
switch argValue {
|
||||
default:
|
||||
return newMount, nil, fmt.Errorf("%v: %q: %w", argName, argValue, errBadMntOption)
|
||||
return newMount, "", nil, fmt.Errorf("%v: %q: %w", argName, argValue, errBadMntOption)
|
||||
case "shared", "rshared", "private", "rprivate", "slave", "rslave":
|
||||
// this should be the relevant parts of the same list of options we accepted above
|
||||
}
|
||||
switch argValue {
|
||||
default:
|
||||
return newMount, "", nil, fmt.Errorf("%v: %q: %w", argName, argValue, errBadMntOption)
|
||||
case "shared", "rshared", "private", "rprivate", "slave", "rslave":
|
||||
// this should be the relevant parts of the same list of options we accepted above
|
||||
}
|
||||
newMount.Options = append(newMount.Options, argValue)
|
||||
case "id":
|
||||
if !hasArgValue {
|
||||
return newMount, nil, fmt.Errorf("%v: %w", argName, errBadOptionArg)
|
||||
return newMount, "", nil, fmt.Errorf("%v: %w", argName, errBadOptionArg)
|
||||
}
|
||||
id = argValue
|
||||
case "from":
|
||||
if !hasArgValue {
|
||||
return newMount, nil, fmt.Errorf("%v: %w", argName, errBadOptionArg)
|
||||
return newMount, "", nil, fmt.Errorf("%v: %w", argName, errBadOptionArg)
|
||||
}
|
||||
fromStage = argValue
|
||||
case "target", "dst", "destination":
|
||||
if !hasArgValue {
|
||||
return newMount, nil, fmt.Errorf("%v: %w", argName, errBadOptionArg)
|
||||
return newMount, "", nil, fmt.Errorf("%v: %w", argName, errBadOptionArg)
|
||||
}
|
||||
targetPath := argValue
|
||||
if !path.IsAbs(targetPath) {
|
||||
targetPath = filepath.Join(workDir, targetPath)
|
||||
}
|
||||
if err := parse.ValidateVolumeCtrDir(targetPath); err != nil {
|
||||
return newMount, nil, err
|
||||
return newMount, "", nil, err
|
||||
}
|
||||
newMount.Destination = targetPath
|
||||
setDest = true
|
||||
case "src", "source":
|
||||
if !hasArgValue {
|
||||
return newMount, nil, fmt.Errorf("%v: %w", argName, errBadOptionArg)
|
||||
return newMount, "", nil, fmt.Errorf("%v: %w", argName, errBadOptionArg)
|
||||
}
|
||||
newMount.Source = argValue
|
||||
case "mode":
|
||||
if !hasArgValue {
|
||||
return newMount, nil, fmt.Errorf("%v: %w", argName, errBadOptionArg)
|
||||
return newMount, "", nil, fmt.Errorf("%v: %w", argName, errBadOptionArg)
|
||||
}
|
||||
mode, err = strconv.ParseUint(argValue, 8, 32)
|
||||
if err != nil {
|
||||
return newMount, nil, fmt.Errorf("unable to parse cache mode: %w", err)
|
||||
return newMount, "", nil, fmt.Errorf("unable to parse cache mode: %w", err)
|
||||
}
|
||||
case "uid":
|
||||
if !hasArgValue {
|
||||
return newMount, nil, fmt.Errorf("%v: %w", argName, errBadOptionArg)
|
||||
return newMount, "", nil, fmt.Errorf("%v: %w", argName, errBadOptionArg)
|
||||
}
|
||||
uid, err = strconv.Atoi(argValue)
|
||||
if err != nil {
|
||||
return newMount, nil, fmt.Errorf("unable to parse cache uid: %w", err)
|
||||
return newMount, "", nil, fmt.Errorf("unable to parse cache uid: %w", err)
|
||||
}
|
||||
case "gid":
|
||||
if !hasArgValue {
|
||||
return newMount, nil, fmt.Errorf("%v: %w", argName, errBadOptionArg)
|
||||
return newMount, "", nil, fmt.Errorf("%v: %w", argName, errBadOptionArg)
|
||||
}
|
||||
gid, err = strconv.Atoi(argValue)
|
||||
if err != nil {
|
||||
return newMount, nil, fmt.Errorf("unable to parse cache gid: %w", err)
|
||||
return newMount, "", nil, fmt.Errorf("unable to parse cache gid: %w", err)
|
||||
}
|
||||
default:
|
||||
return newMount, nil, fmt.Errorf("%v: %w", argName, errBadMntOption)
|
||||
return newMount, "", nil, fmt.Errorf("%v: %w", argName, errBadMntOption)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -430,10 +468,14 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a
|
|||
}
|
||||
|
||||
if !setDest {
|
||||
return newMount, nil, errBadVolDest
|
||||
return newMount, "", nil, errBadVolDest
|
||||
}
|
||||
|
||||
thisCacheRoot := ""
|
||||
if fromStage != "" {
|
||||
// do not create and use a cache directory on the host,
|
||||
// instead use the location in the mounted stage or
|
||||
// temporary directory as the cache
|
||||
mountPoint := ""
|
||||
if additionalMountPoints != nil {
|
||||
if val, ok := additionalMountPoints[fromStage]; ok {
|
||||
|
@ -445,14 +487,9 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a
|
|||
// Cache does not supports using image so if not stage found
|
||||
// return with error
|
||||
if mountPoint == "" {
|
||||
return newMount, nil, fmt.Errorf("no stage or additional build context found with name %s", fromStage)
|
||||
return newMount, "", nil, fmt.Errorf("no stage or additional build context found with name %s", fromStage)
|
||||
}
|
||||
// path should be /mountPoint/specified path
|
||||
evaluated, err := copier.Eval(mountPoint, mountPoint+string(filepath.Separator)+newMount.Source, copier.EvalOptions{})
|
||||
if err != nil {
|
||||
return newMount, nil, err
|
||||
}
|
||||
newMount.Source = evaluated
|
||||
thisCacheRoot = mountPoint
|
||||
} else {
|
||||
// we need to create cache on host if no image is being used
|
||||
|
||||
|
@ -462,64 +499,73 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a
|
|||
|
||||
// cache parent directory: creates separate cache parent for each user.
|
||||
cacheParent := CacheParent()
|
||||
|
||||
// create cache on host if not present
|
||||
err = os.MkdirAll(cacheParent, os.FileMode(0755))
|
||||
if err != nil {
|
||||
return newMount, nil, fmt.Errorf("unable to create build cache directory: %w", err)
|
||||
return newMount, "", nil, fmt.Errorf("unable to create build cache directory: %w", err)
|
||||
}
|
||||
|
||||
if id != "" {
|
||||
// Don't let the user control where we place the directory.
|
||||
dirID := digest.FromString(id).Encoded()[:16]
|
||||
newMount.Source = filepath.Join(cacheParent, dirID)
|
||||
thisCacheRoot = filepath.Join(cacheParent, dirID)
|
||||
buildahLockFilesDir = filepath.Join(BuildahCacheLockfileDir, dirID)
|
||||
} else {
|
||||
// Don't let the user control where we place the directory.
|
||||
dirID := digest.FromString(newMount.Destination).Encoded()[:16]
|
||||
newMount.Source = filepath.Join(cacheParent, dirID)
|
||||
thisCacheRoot = filepath.Join(cacheParent, dirID)
|
||||
buildahLockFilesDir = filepath.Join(BuildahCacheLockfileDir, dirID)
|
||||
}
|
||||
|
||||
idPair := idtools.IDPair{
|
||||
UID: uid,
|
||||
GID: gid,
|
||||
}
|
||||
// buildkit parity: change uid and gid if specified otheriwise keep `0`
|
||||
err = idtools.MkdirAllAndChownNew(newMount.Source, os.FileMode(mode), idPair)
|
||||
// buildkit parity: change uid and gid if specified, otherwise keep `0`
|
||||
err = idtools.MkdirAllAndChownNew(thisCacheRoot, os.FileMode(mode), idPair)
|
||||
if err != nil {
|
||||
return newMount, nil, fmt.Errorf("unable to change uid,gid of cache directory: %w", err)
|
||||
return newMount, "", nil, fmt.Errorf("unable to change uid,gid of cache directory: %w", err)
|
||||
}
|
||||
|
||||
// create a subdirectory inside `cacheParent` just to store lockfiles
|
||||
buildahLockFilesDir = filepath.Join(cacheParent, buildahLockFilesDir)
|
||||
err = os.MkdirAll(buildahLockFilesDir, os.FileMode(0700))
|
||||
if err != nil {
|
||||
return newMount, nil, fmt.Errorf("unable to create build cache lockfiles directory: %w", err)
|
||||
return newMount, "", nil, fmt.Errorf("unable to create build cache lockfiles directory: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
var targetLock *lockfile.LockFile // = nil
|
||||
// path should be /mountPoint/specified path
|
||||
evaluated, err := copier.Eval(thisCacheRoot, thisCacheRoot+string(filepath.Separator)+newMount.Source, copier.EvalOptions{})
|
||||
if err != nil {
|
||||
return newMount, "", nil, err
|
||||
}
|
||||
newMount.Source = evaluated
|
||||
|
||||
succeeded := false
|
||||
defer func() {
|
||||
if !succeeded && targetLock != nil {
|
||||
targetLock.Unlock()
|
||||
}
|
||||
}()
|
||||
var targetLock *lockfile.LockFile
|
||||
switch sharing {
|
||||
case "locked":
|
||||
// lock parent cache
|
||||
lockfile, err := lockfile.GetLockFile(filepath.Join(buildahLockFilesDir, BuildahCacheLockfile))
|
||||
if err != nil {
|
||||
return newMount, nil, fmt.Errorf("unable to acquire lock when sharing mode is locked: %w", err)
|
||||
return newMount, "", nil, fmt.Errorf("unable to acquire lock when sharing mode is locked: %w", err)
|
||||
}
|
||||
// Will be unlocked after the RUN step is executed.
|
||||
lockfile.Lock()
|
||||
targetLock = lockfile
|
||||
defer func() {
|
||||
if !succeeded {
|
||||
targetLock.Unlock()
|
||||
}
|
||||
}()
|
||||
case "shared":
|
||||
// do nothing since default is `shared`
|
||||
break
|
||||
default:
|
||||
// error out for unknown values
|
||||
return newMount, nil, fmt.Errorf("unrecognized value %q for field `sharing`: %w", sharing, err)
|
||||
return newMount, "", nil, fmt.Errorf("unrecognized value %q for field `sharing`: %w", sharing, err)
|
||||
}
|
||||
|
||||
// buildkit parity: default sharing should be shared
|
||||
|
@ -537,12 +583,29 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a
|
|||
|
||||
opts, err := parse.ValidateVolumeOpts(newMount.Options)
|
||||
if err != nil {
|
||||
return newMount, nil, err
|
||||
return newMount, "", nil, err
|
||||
}
|
||||
newMount.Options = opts
|
||||
|
||||
var intermediateMount string
|
||||
if newMount.Source != thisCacheRoot {
|
||||
rel, err := filepath.Rel(thisCacheRoot, newMount.Source)
|
||||
if err != nil {
|
||||
return newMount, "", nil, fmt.Errorf("computing pathname of cache subdirectory: %w", err)
|
||||
}
|
||||
if rel != "." && rel != "/" {
|
||||
mnt, err := bindFromChroot(thisCacheRoot, rel, tmpDir)
|
||||
if err != nil {
|
||||
return newMount, "", nil, fmt.Errorf("sanitizing cache subdirectory %q: %w", newMount.Source, err)
|
||||
}
|
||||
logrus.Debugf("bind-mounted %q under %q to %q", rel, thisCacheRoot, mnt)
|
||||
intermediateMount = mnt
|
||||
newMount.Source = intermediateMount
|
||||
}
|
||||
}
|
||||
|
||||
succeeded = true
|
||||
return newMount, targetLock, nil
|
||||
return newMount, intermediateMount, targetLock, nil
|
||||
}
|
||||
|
||||
func getVolumeMounts(volumes []string) (map[string]specs.Mount, error) {
|
||||
|
@ -568,27 +631,41 @@ func UnlockLockArray(locks []*lockfile.LockFile) {
|
|||
}
|
||||
}
|
||||
|
||||
// GetVolumes gets the volumes from --volume and --mount
|
||||
// GetVolumes gets the volumes from --volume and --mount flags.
|
||||
//
|
||||
// If this function succeeds, the caller must clean up the returned overlay
|
||||
// mounts, unmount the mounted images, and unlock the returned
|
||||
// *lockfile.LockFile s if any (when??).
|
||||
func GetVolumes(ctx *types.SystemContext, store storage.Store, mountLabel string, volumes []string, mounts []string, contextDir, workDir, tmpDir string) ([]specs.Mount, []string, []string, []*lockfile.LockFile, error) {
|
||||
unifiedMounts, mountedImages, overlayDirs, targetLocks, err := getMounts(ctx, store, mountLabel, mounts, contextDir, workDir, tmpDir)
|
||||
// Returns a slice of Mounts to add to the runtime spec's list of mounts, the
|
||||
// IDs of any images we mounted, a slice of bind-mounted paths, a slice of
|
||||
// overlay directories and a slice of locks that we acquired, or an error.
|
||||
//
|
||||
// The caller is expected to, after the command which uses the mounts and
|
||||
// volumes exits, clean up the overlay directories, unmount and remove the
|
||||
// mountpoints for the bind-mounted paths, unmount any images we mounted, and
|
||||
// release the locks we returned (either using UnlockLockArray() or by
|
||||
// iterating over them and unlocking them).
|
||||
func GetVolumes(ctx *types.SystemContext, store storage.Store, mountLabel string, volumes []string, mounts []string, contextDir, workDir, tmpDir string) ([]specs.Mount, []string, []string, []string, []*lockfile.LockFile, error) {
|
||||
unifiedMounts, mountedImages, intermediateMounts, overlayMounts, targetLocks, err := getMounts(ctx, store, mountLabel, mounts, contextDir, workDir, tmpDir)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
return nil, nil, nil, nil, nil, err
|
||||
}
|
||||
succeeded := false
|
||||
defer func() {
|
||||
if !succeeded {
|
||||
for _, overlayDir := range overlayDirs {
|
||||
if err := overlay.RemoveTemp(overlayDir); err != nil {
|
||||
logrus.Debugf("unmounting overlay mount at %q: %v", overlayDir, err)
|
||||
for _, overlayMount := range overlayMounts {
|
||||
if err := overlay.RemoveTemp(overlayMount); err != nil {
|
||||
logrus.Debugf("unmounting overlay at %q: %v", overlayMount, err)
|
||||
}
|
||||
}
|
||||
for _, mountedImage := range mountedImages {
|
||||
if _, err := store.UnmountImage(mountedImage, false); err != nil {
|
||||
logrus.Debugf("unmounting image %q: %v", mountedImage, err)
|
||||
for _, intermediateMount := range intermediateMounts {
|
||||
if err := mount.Unmount(intermediateMount); err != nil {
|
||||
logrus.Debugf("unmounting intermediate mount point %q: %v", intermediateMount, err)
|
||||
}
|
||||
if err := os.Remove(intermediateMount); err != nil {
|
||||
logrus.Debugf("removing should-be-empty directory %q: %v", intermediateMount, err)
|
||||
}
|
||||
}
|
||||
for _, image := range mountedImages {
|
||||
if _, err := store.UnmountImage(image, false); err != nil {
|
||||
logrus.Debugf("unmounting image %q: %v", image, err)
|
||||
}
|
||||
}
|
||||
UnlockLockArray(targetLocks)
|
||||
|
@ -596,11 +673,11 @@ func GetVolumes(ctx *types.SystemContext, store storage.Store, mountLabel string
|
|||
}()
|
||||
volumeMounts, err := getVolumeMounts(volumes)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
return nil, nil, nil, nil, nil, err
|
||||
}
|
||||
for dest, mount := range volumeMounts {
|
||||
if _, ok := unifiedMounts[dest]; ok {
|
||||
return nil, nil, nil, nil, fmt.Errorf("%v: %w", dest, errDuplicateDest)
|
||||
return nil, nil, nil, nil, nil, fmt.Errorf("%v: %w", dest, errDuplicateDest)
|
||||
}
|
||||
unifiedMounts[dest] = mount
|
||||
}
|
||||
|
@ -610,33 +687,51 @@ func GetVolumes(ctx *types.SystemContext, store storage.Store, mountLabel string
|
|||
finalMounts = append(finalMounts, mount)
|
||||
}
|
||||
succeeded = true
|
||||
return finalMounts, mountedImages, overlayDirs, targetLocks, nil
|
||||
return finalMounts, mountedImages, intermediateMounts, overlayMounts, targetLocks, nil
|
||||
}
|
||||
|
||||
// getMounts takes user-provided input from the --mount flag and creates OCI
|
||||
// spec mounts.
|
||||
// buildah run --mount type=bind,src=/etc/resolv.conf,target=/etc/resolv.conf ...
|
||||
// buildah run --mount type=tmpfs,target=/dev/shm ...
|
||||
// getMounts takes user-provided inputs from the --mount flag and returns a
|
||||
// slice of OCI spec mounts, a slice of mounted image IDs, a slice of other
|
||||
// mount locations, a slice of overlay mounts, and a slice of locks, or an
|
||||
// error.
|
||||
//
|
||||
// If this function succeeds, the caller must unlock the returned *lockfile.LockFile s if any (when??).
|
||||
func getMounts(ctx *types.SystemContext, store storage.Store, mountLabel string, mounts []string, contextDir, workDir, tmpDir string) (map[string]specs.Mount, []string, []string, []*lockfile.LockFile, error) {
|
||||
// buildah run --mount type=bind,src=/etc/resolv.conf,target=/etc/resolv.conf ...
|
||||
// buildah run --mount type=cache,target=/var/cache ...
|
||||
// buildah run --mount type=tmpfs,target=/dev/shm ...
|
||||
//
|
||||
// The caller is expected to, after the command which uses the mounts exits,
|
||||
// unmount the overlay filesystems (if we mounted any), unmount the other
|
||||
// mounted filesystems and remove their mountpoints (if we provided any paths
|
||||
// to mountpoints), unmount any mounted images (if we provided the IDs of any),
|
||||
// and then unlock the locks we returned (either using UnlockLockArray() or by
|
||||
// iterating over them and unlocking them).
|
||||
func getMounts(ctx *types.SystemContext, store storage.Store, mountLabel string, mounts []string, contextDir, workDir, tmpDir string) (map[string]specs.Mount, []string, []string, []string, []*lockfile.LockFile, error) {
|
||||
// If `type` is not set default to "bind"
|
||||
mountType := define.TypeBind
|
||||
finalMounts := make(map[string]specs.Mount, len(mounts))
|
||||
mountedImages := make([]string, 0, len(mounts))
|
||||
overlayDirs := make([]string, 0, len(mounts))
|
||||
intermediateMounts := make([]string, 0, len(mounts))
|
||||
overlayMounts := make([]string, 0, len(mounts))
|
||||
targetLocks := make([]*lockfile.LockFile, 0, len(mounts))
|
||||
succeeded := false
|
||||
defer func() {
|
||||
if !succeeded {
|
||||
for _, overlayDir := range overlayDirs {
|
||||
for _, overlayDir := range overlayMounts {
|
||||
if err := overlay.RemoveTemp(overlayDir); err != nil {
|
||||
logrus.Debugf("unmounting overlay mount at %q: %v", overlayDir, err)
|
||||
}
|
||||
}
|
||||
for _, mountedImage := range mountedImages {
|
||||
if _, err := store.UnmountImage(mountedImage, false); err != nil {
|
||||
logrus.Debugf("unmounting image %q: %v", mountedImage, err)
|
||||
for _, intermediateMount := range intermediateMounts {
|
||||
if err := mount.Unmount(intermediateMount); err != nil {
|
||||
logrus.Debugf("unmounting intermediate mount point %q: %v", intermediateMount, err)
|
||||
}
|
||||
if err := os.Remove(intermediateMount); err != nil {
|
||||
logrus.Debugf("removing should-be-empty directory %q: %v", intermediateMount, err)
|
||||
}
|
||||
}
|
||||
for _, image := range mountedImages {
|
||||
if _, err := store.UnmountImage(image, false); err != nil {
|
||||
logrus.Debugf("unmounting image %q: %v", image, err)
|
||||
}
|
||||
}
|
||||
UnlockLockArray(targetLocks)
|
||||
|
@ -651,61 +746,67 @@ func getMounts(ctx *types.SystemContext, store storage.Store, mountLabel string,
|
|||
for _, mount := range mounts {
|
||||
tokens := strings.Split(mount, ",")
|
||||
if len(tokens) < 2 {
|
||||
return nil, nil, nil, nil, fmt.Errorf("%q: %w", mount, errInvalidSyntax)
|
||||
return nil, nil, nil, nil, nil, fmt.Errorf("%q: %w", mount, errInvalidSyntax)
|
||||
}
|
||||
for _, field := range tokens {
|
||||
if strings.HasPrefix(field, "type=") {
|
||||
kv := strings.Split(field, "=")
|
||||
if len(kv) != 2 {
|
||||
return nil, nil, nil, nil, fmt.Errorf("%q: %w", mount, errInvalidSyntax)
|
||||
return nil, nil, nil, nil, nil, fmt.Errorf("%q: %w", mount, errInvalidSyntax)
|
||||
}
|
||||
mountType = kv[1]
|
||||
}
|
||||
}
|
||||
switch mountType {
|
||||
case define.TypeBind:
|
||||
mount, image, overlayDir, err := GetBindMount(ctx, tokens, contextDir, store, mountLabel, nil, workDir, tmpDir)
|
||||
mount, image, intermediateMount, overlayMount, err := GetBindMount(ctx, tokens, contextDir, store, mountLabel, nil, workDir, tmpDir)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
return nil, nil, nil, nil, nil, err
|
||||
}
|
||||
if image != "" {
|
||||
mountedImages = append(mountedImages, image)
|
||||
}
|
||||
if overlayDir != "" {
|
||||
overlayDirs = append(overlayDirs, overlayDir)
|
||||
if intermediateMount != "" {
|
||||
intermediateMounts = append(intermediateMounts, intermediateMount)
|
||||
}
|
||||
if overlayMount != "" {
|
||||
overlayMounts = append(overlayMounts, overlayMount)
|
||||
}
|
||||
if _, ok := finalMounts[mount.Destination]; ok {
|
||||
return nil, nil, nil, nil, fmt.Errorf("%v: %w", mount.Destination, errDuplicateDest)
|
||||
return nil, nil, nil, nil, nil, fmt.Errorf("%v: %w", mount.Destination, errDuplicateDest)
|
||||
}
|
||||
finalMounts[mount.Destination] = mount
|
||||
case TypeCache:
|
||||
mount, tl, err := GetCacheMount(tokens, store, "", nil, workDir)
|
||||
mount, intermediateMount, tl, err := GetCacheMount(tokens, nil, workDir, tmpDir)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
return nil, nil, nil, nil, nil, err
|
||||
}
|
||||
if intermediateMount != "" {
|
||||
intermediateMounts = append(intermediateMounts, intermediateMount)
|
||||
}
|
||||
if tl != nil {
|
||||
targetLocks = append(targetLocks, tl)
|
||||
}
|
||||
if _, ok := finalMounts[mount.Destination]; ok {
|
||||
return nil, nil, nil, nil, fmt.Errorf("%v: %w", mount.Destination, errDuplicateDest)
|
||||
return nil, nil, nil, nil, nil, fmt.Errorf("%v: %w", mount.Destination, errDuplicateDest)
|
||||
}
|
||||
finalMounts[mount.Destination] = mount
|
||||
case TypeTmpfs:
|
||||
mount, err := GetTmpfsMount(tokens)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
return nil, nil, nil, nil, nil, err
|
||||
}
|
||||
if _, ok := finalMounts[mount.Destination]; ok {
|
||||
return nil, nil, nil, nil, fmt.Errorf("%v: %w", mount.Destination, errDuplicateDest)
|
||||
return nil, nil, nil, nil, nil, fmt.Errorf("%v: %w", mount.Destination, errDuplicateDest)
|
||||
}
|
||||
finalMounts[mount.Destination] = mount
|
||||
default:
|
||||
return nil, nil, nil, nil, fmt.Errorf("invalid filesystem type %q", mountType)
|
||||
return nil, nil, nil, nil, nil, fmt.Errorf("invalid filesystem type %q", mountType)
|
||||
}
|
||||
}
|
||||
|
||||
succeeded = true
|
||||
return finalMounts, mountedImages, overlayDirs, targetLocks, nil
|
||||
return finalMounts, mountedImages, intermediateMounts, overlayMounts, targetLocks, nil
|
||||
}
|
||||
|
||||
// GetTmpfsMount parses a single tmpfs mount entry from the --mount flag
|
||||
|
|
2
run.go
2
run.go
|
@ -184,6 +184,8 @@ type runMountArtifacts struct {
|
|||
SSHAuthSock string
|
||||
// Lock files, which should have their Unlock() methods called
|
||||
TargetLocks []*lockfile.LockFile
|
||||
// Intermediate mount points, which should be Unmount()ed and Removed()d
|
||||
IntermediateMounts []string
|
||||
}
|
||||
|
||||
// RunMountInfo are the available run mounts for this run
|
||||
|
|
|
@ -42,6 +42,7 @@ import (
|
|||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/ioutils"
|
||||
"github.com/containers/storage/pkg/lockfile"
|
||||
"github.com/containers/storage/pkg/mount"
|
||||
"github.com/containers/storage/pkg/reexec"
|
||||
"github.com/containers/storage/pkg/unshare"
|
||||
"github.com/opencontainers/go-digest"
|
||||
|
@ -1519,6 +1520,7 @@ func (b *Builder) runSetupRunMounts(mountPoint, bundlePath string, mounts []stri
|
|||
mountTargets := make([]string, 0, len(mounts))
|
||||
tmpFiles := make([]string, 0, len(mounts))
|
||||
mountImages := make([]string, 0, len(mounts))
|
||||
intermediateMounts := make([]string, 0, len(mounts))
|
||||
finalMounts := make([]specs.Mount, 0, len(mounts))
|
||||
agents := make([]*sshagent.AgentServer, 0, len(mounts))
|
||||
defaultSSHSock := ""
|
||||
|
@ -1538,6 +1540,14 @@ func (b *Builder) runSetupRunMounts(mountPoint, bundlePath string, mounts []stri
|
|||
b.Logger.Error(err.Error())
|
||||
}
|
||||
}
|
||||
for _, intermediateMount := range intermediateMounts {
|
||||
if err := mount.Unmount(intermediateMount); err != nil {
|
||||
b.Logger.Errorf("unmounting %q: %v", intermediateMount, err)
|
||||
}
|
||||
if err := os.Remove(intermediateMount); err != nil {
|
||||
b.Logger.Errorf("removing should-be-empty directory %q: %v", intermediateMount, err)
|
||||
}
|
||||
}
|
||||
for _, mountImage := range mountImages {
|
||||
if _, err := b.store.UnmountImage(mountImage, false); err != nil {
|
||||
b.Logger.Error(err.Error())
|
||||
|
@ -1554,9 +1564,10 @@ func (b *Builder) runSetupRunMounts(mountPoint, bundlePath string, mounts []stri
|
|||
for _, mount := range mounts {
|
||||
var mountSpec *specs.Mount
|
||||
var err error
|
||||
var envFile, image, bundleMountsDir, overlayDir string
|
||||
var envFile, image, bundleMountsDir, overlayDir, intermediateMount string
|
||||
var agent *sshagent.AgentServer
|
||||
var tl *lockfile.LockFile
|
||||
|
||||
tokens := strings.Split(mount, ",")
|
||||
|
||||
// If `type` is not set default to TypeBind
|
||||
|
@ -1601,7 +1612,7 @@ func (b *Builder) runSetupRunMounts(mountPoint, bundlePath string, mounts []stri
|
|||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
mountSpec, image, overlayDir, err = b.getBindMount(tokens, sources.SystemContext, sources.ContextDir, sources.StageMountPoints, idMaps, sources.WorkDir, bundleMountsDir)
|
||||
mountSpec, image, intermediateMount, overlayDir, err = b.getBindMount(tokens, sources.SystemContext, sources.ContextDir, sources.StageMountPoints, idMaps, sources.WorkDir, bundleMountsDir)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
@ -1611,6 +1622,9 @@ func (b *Builder) runSetupRunMounts(mountPoint, bundlePath string, mounts []stri
|
|||
if overlayDir != "" {
|
||||
overlayDirs = append(overlayDirs, overlayDir)
|
||||
}
|
||||
if intermediateMount != "" {
|
||||
intermediateMounts = append(intermediateMounts, intermediateMount)
|
||||
}
|
||||
finalMounts = append(finalMounts, *mountSpec)
|
||||
case "tmpfs":
|
||||
mountSpec, err = b.getTmpfsMount(tokens, idMaps)
|
||||
|
@ -1619,10 +1633,18 @@ func (b *Builder) runSetupRunMounts(mountPoint, bundlePath string, mounts []stri
|
|||
}
|
||||
finalMounts = append(finalMounts, *mountSpec)
|
||||
case "cache":
|
||||
mountSpec, tl, err = b.getCacheMount(tokens, sources.StageMountPoints, idMaps, sources.WorkDir)
|
||||
if bundleMountsDir == "" {
|
||||
if bundleMountsDir, err = os.MkdirTemp(bundlePath, "mounts"); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
mountSpec, intermediateMount, tl, err = b.getCacheMount(tokens, sources.StageMountPoints, idMaps, sources.WorkDir, bundleMountsDir)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if intermediateMount != "" {
|
||||
intermediateMounts = append(intermediateMounts, intermediateMount)
|
||||
}
|
||||
if tl != nil {
|
||||
targetLocks = append(targetLocks, tl)
|
||||
}
|
||||
|
@ -1646,32 +1668,33 @@ func (b *Builder) runSetupRunMounts(mountPoint, bundlePath string, mounts []stri
|
|||
}
|
||||
succeeded = true
|
||||
artifacts := &runMountArtifacts{
|
||||
RunMountTargets: mountTargets,
|
||||
RunOverlayDirs: overlayDirs,
|
||||
TmpFiles: tmpFiles,
|
||||
Agents: agents,
|
||||
MountedImages: mountImages,
|
||||
SSHAuthSock: defaultSSHSock,
|
||||
TargetLocks: targetLocks,
|
||||
RunMountTargets: mountTargets,
|
||||
RunOverlayDirs: overlayDirs,
|
||||
TmpFiles: tmpFiles,
|
||||
Agents: agents,
|
||||
MountedImages: mountImages,
|
||||
SSHAuthSock: defaultSSHSock,
|
||||
TargetLocks: targetLocks,
|
||||
IntermediateMounts: intermediateMounts,
|
||||
}
|
||||
return finalMounts, artifacts, nil
|
||||
}
|
||||
|
||||
func (b *Builder) getBindMount(tokens []string, sys *types.SystemContext, contextDir string, stageMountPoints map[string]internal.StageMountDetails, idMaps IDMaps, workDir, tmpDir string) (*specs.Mount, string, string, error) {
|
||||
func (b *Builder) getBindMount(tokens []string, sys *types.SystemContext, contextDir string, stageMountPoints map[string]internal.StageMountDetails, idMaps IDMaps, workDir, tmpDir string) (*specs.Mount, string, string, string, error) {
|
||||
if contextDir == "" {
|
||||
return nil, "", "", errors.New("context directory for current run invocation is not configured")
|
||||
return nil, "", "", "", errors.New("context directory for current run invocation is not configured")
|
||||
}
|
||||
var optionMounts []specs.Mount
|
||||
mount, image, overlayDir, err := volumes.GetBindMount(sys, tokens, contextDir, b.store, b.MountLabel, stageMountPoints, workDir, tmpDir)
|
||||
mount, image, intermediateMount, overlayMount, err := volumes.GetBindMount(sys, tokens, contextDir, b.store, b.MountLabel, stageMountPoints, workDir, tmpDir)
|
||||
if err != nil {
|
||||
return nil, image, overlayDir, err
|
||||
return nil, "", "", "", err
|
||||
}
|
||||
optionMounts = append(optionMounts, mount)
|
||||
volumes, err := b.runSetupVolumeMounts(b.MountLabel, nil, optionMounts, idMaps)
|
||||
if err != nil {
|
||||
return nil, image, overlayDir, err
|
||||
return nil, "", "", "", err
|
||||
}
|
||||
return &volumes[0], image, overlayDir, nil
|
||||
return &volumes[0], image, intermediateMount, overlayMount, nil
|
||||
}
|
||||
|
||||
func (b *Builder) getTmpfsMount(tokens []string, idMaps IDMaps) (*specs.Mount, error) {
|
||||
|
@ -1945,9 +1968,9 @@ func (b *Builder) cleanupTempVolumes() {
|
|||
// cleanupRunMounts cleans up run mounts so they only appear in this run.
|
||||
func (b *Builder) cleanupRunMounts(mountpoint string, artifacts *runMountArtifacts) error {
|
||||
for _, agent := range artifacts.Agents {
|
||||
err := agent.Shutdown()
|
||||
if err != nil {
|
||||
return err
|
||||
servePath := agent.ServePath()
|
||||
if err := agent.Shutdown(); err != nil {
|
||||
return fmt.Errorf("shutting down SSH agent at %q: %v", servePath, err)
|
||||
}
|
||||
}
|
||||
// clean up any overlays we mounted
|
||||
|
@ -1956,6 +1979,15 @@ func (b *Builder) cleanupRunMounts(mountpoint string, artifacts *runMountArtifac
|
|||
return err
|
||||
}
|
||||
}
|
||||
// unmount anything that needs unmounting
|
||||
for _, intermediateMount := range artifacts.IntermediateMounts {
|
||||
if err := mount.Unmount(intermediateMount); err != nil && !errors.Is(err, os.ErrNotExist) {
|
||||
return fmt.Errorf("unmounting %q: %w", intermediateMount, err)
|
||||
}
|
||||
if err := os.Remove(intermediateMount); err != nil && !errors.Is(err, os.ErrNotExist) {
|
||||
return fmt.Errorf("removing should-be-empty directory %q: %w", intermediateMount, err)
|
||||
}
|
||||
}
|
||||
// unmount any images we mounted for this run
|
||||
for _, image := range artifacts.MountedImages {
|
||||
if _, err := b.store.UnmountImage(image, false); err != nil {
|
||||
|
|
|
@ -351,9 +351,12 @@ func setupSpecialMountSpecChanges(spec *spec.Spec, shmSize string) ([]specs.Moun
|
|||
return spec.Mounts, nil
|
||||
}
|
||||
|
||||
// If this function succeeds and returns a non-nil *lockfile.LockFile, the caller must unlock it (when??).
|
||||
func (b *Builder) getCacheMount(tokens []string, stageMountPoints map[string]internal.StageMountDetails, idMaps IDMaps, workDir string) (*spec.Mount, *lockfile.LockFile, error) {
|
||||
return nil, nil, errors.New("cache mounts not supported on freebsd")
|
||||
// If this succeeded, the caller would be expected to, after the command which
|
||||
// uses the mount exits, unmount the mounted filesystem and remove its
|
||||
// mountpoint (if we provided the path to its mountpoint), and release the lock
|
||||
// (if we took one).
|
||||
func (b *Builder) getCacheMount(tokens []string, stageMountPoints map[string]internal.StageMountDetails, idMaps IDMaps, workDir, tmpDir string) (*specs.Mount, string, *lockfile.LockFile, error) {
|
||||
return nil, "", nil, errors.New("cache mounts not supported on freebsd")
|
||||
}
|
||||
|
||||
func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string, optionMounts []specs.Mount, idMaps IDMaps) (mounts []specs.Mount, Err error) {
|
||||
|
|
34
run_linux.go
34
run_linux.go
|
@ -37,6 +37,7 @@ import (
|
|||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/ioutils"
|
||||
"github.com/containers/storage/pkg/lockfile"
|
||||
"github.com/containers/storage/pkg/mount"
|
||||
"github.com/containers/storage/pkg/stringid"
|
||||
"github.com/containers/storage/pkg/unshare"
|
||||
"github.com/docker/go-units"
|
||||
|
@ -1273,24 +1274,39 @@ func checkIdsGreaterThan5(ids []specs.LinuxIDMapping) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// If this function succeeds and returns a non-nil *lockfile.LockFile, the caller must unlock it (when??).
|
||||
func (b *Builder) getCacheMount(tokens []string, stageMountPoints map[string]internal.StageMountDetails, idMaps IDMaps, workDir string) (*specs.Mount, *lockfile.LockFile, error) {
|
||||
// Returns a Mount to add to the runtime spec's list of mounts, an optional
|
||||
// path of a mounted filesystem, unmounted, and an optional lock, or an error.
|
||||
//
|
||||
// The caller is expected to, after the command which uses the mount exits,
|
||||
// unmount the mounted filesystem (if we provided the path to its mountpoint)
|
||||
// and remove its mountpoint, , and release the lock (if we took one).
|
||||
func (b *Builder) getCacheMount(tokens []string, stageMountPoints map[string]internal.StageMountDetails, idMaps IDMaps, workDir, tmpDir string) (*specs.Mount, string, *lockfile.LockFile, error) {
|
||||
var optionMounts []specs.Mount
|
||||
mount, targetLock, err := volumes.GetCacheMount(tokens, b.store, b.MountLabel, stageMountPoints, workDir)
|
||||
optionMount, intermediateMount, targetLock, err := volumes.GetCacheMount(tokens, stageMountPoints, workDir, tmpDir)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, "", nil, err
|
||||
}
|
||||
succeeded := false
|
||||
defer func() {
|
||||
if !succeeded && targetLock != nil {
|
||||
targetLock.Unlock()
|
||||
if !succeeded {
|
||||
if intermediateMount != "" {
|
||||
if err := mount.Unmount(intermediateMount); err != nil {
|
||||
b.Logger.Debugf("unmounting %q: %v", intermediateMount, err)
|
||||
}
|
||||
if err := os.Remove(intermediateMount); err != nil {
|
||||
b.Logger.Debugf("removing should-be-empty directory %q: %v", intermediateMount, err)
|
||||
}
|
||||
}
|
||||
if targetLock != nil {
|
||||
targetLock.Unlock()
|
||||
}
|
||||
}
|
||||
}()
|
||||
optionMounts = append(optionMounts, mount)
|
||||
optionMounts = append(optionMounts, optionMount)
|
||||
volumes, err := b.runSetupVolumeMounts(b.MountLabel, nil, optionMounts, idMaps)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, "", nil, err
|
||||
}
|
||||
succeeded = true
|
||||
return &volumes[0], targetLock, nil
|
||||
return &volumes[0], intermediateMount, targetLock, nil
|
||||
}
|
||||
|
|
|
@ -6707,8 +6707,8 @@ RUN --mount=type=cache,source=../../../../../../../../../../../$TEST_SCRATCH_DIR
|
|||
ls -l /var/tmp && cat /var/tmp/file.txt
|
||||
EOF
|
||||
|
||||
run_buildah 1 build --no-cache ${TEST_SCRATCH_DIR}
|
||||
expect_output --substring "cat: can't open '/var/tmp/file.txt': No such file or directory"
|
||||
run_buildah 125 build --no-cache ${TEST_SCRATCH_DIR}
|
||||
expect_output --substring "no such file or directory"
|
||||
|
||||
mkdir ${TEST_SCRATCH_DIR}/cve20249675
|
||||
cat > ${TEST_SCRATCH_DIR}/cve20249675/Containerfile <<EOF
|
||||
|
|
|
@ -1,2 +1,3 @@
|
|||
FROM scratch
|
||||
COPY hello .
|
||||
COPY hello hello1 .
|
||||
COPY hello2 /subdir/hello
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
hello1
|
|
@ -460,8 +460,10 @@ function configure_and_check_user() {
|
|||
_prefetch alpine
|
||||
run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
|
||||
cid=$output
|
||||
run_buildah run --mount type=bind,source=.,from=buildkitbase,target=/test${zflag} $cid cat /test/hello
|
||||
expect_output --substring "hello"
|
||||
run_buildah run --mount type=bind,source=.,from=buildkitbase,target=/test${zflag} $cid cat /test/hello1
|
||||
expect_output --substring "hello1"
|
||||
run_buildah run --mount type=bind,source=subdir,from=buildkitbase,target=/test${zflag} $cid cat /test/hello
|
||||
expect_output --substring "hello2"
|
||||
}
|
||||
|
||||
@test "run --mount=type=cache like buildkit" {
|
||||
|
@ -475,8 +477,8 @@ function configure_and_check_user() {
|
|||
_prefetch alpine
|
||||
run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
|
||||
cid=$output
|
||||
run_buildah run --mount type=cache,target=/test${zflag} $cid sh -c 'echo "hello" > /test/hello && cat /test/hello'
|
||||
run_buildah run --mount type=cache,target=/test${zflag} $cid cat /test/hello
|
||||
run_buildah run --mount type=cache,target=/test${zflag} $cid sh -c 'mkdir -p /test/subdir && echo "hello" > /test/subdir/h.txt && cat /test/subdir/h.txt'
|
||||
run_buildah run --mount type=cache,src=subdir,target=/test${zflag} $cid cat /test/h.txt
|
||||
expect_output --substring "hello"
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue