Fix TOCTOU error when bind and cache mounts use "src" values

Fix a time-of-check/time-of-use error when mounting type=bind and
type=cache directories that use a "src" flag.  A hostile writer could
use a concurrently-running stage or build to replace that "src" location
between the point when we had resolved possible symbolic links and when
runc/crun/whatever actually went to create the bind mount
(CVE-2024-11218).

Stop ignoring the "src" option for cache mounts when there's no "from"
option.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
Signed-off-by: David Shea <dshea@redhat.com>
This commit is contained in:
Nalin Dahyabhai 2024-12-13 16:55:59 -05:00 committed by David Shea
parent d524a04ca1
commit 720db1bebd
11 changed files with 301 additions and 126 deletions

View File

@ -12,6 +12,7 @@ import (
"github.com/containers/buildah/pkg/overlay"
"github.com/containers/buildah/pkg/parse"
"github.com/containers/buildah/util"
"github.com/containers/storage/pkg/mount"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
@ -170,7 +171,7 @@ func runCmd(c *cobra.Command, args []string, iopts runInputOptions) error {
if err != nil {
return errors.Wrapf(err, "error building system context")
}
mounts, mountedImages, _, lockedTargets, err := internalParse.GetVolumes(systemContext, store, builder.MountLabel, iopts.volumes, iopts.mounts, iopts.contextDir, tmpDir)
mounts, mountedImages, intermediateMounts, _, lockedTargets, err := internalParse.GetVolumes(systemContext, store, builder.MountLabel, iopts.volumes, iopts.mounts, iopts.contextDir, tmpDir)
if err != nil {
return err
}
@ -178,6 +179,14 @@ func runCmd(c *cobra.Command, args []string, iopts runInputOptions) error {
if err := overlay.CleanupContent(tmpDir); err != nil {
logrus.Debugf("unmounting overlay mounts under %q: %v", tmpDir, err)
}
for _, intermediateMount := range intermediateMounts {
if err := mount.Unmount(intermediateMount); err != nil {
logrus.Debugf("unmounting mount %q: %v", intermediateMount, err)
}
if err := os.Remove(intermediateMount); err != nil {
logrus.Debugf("removing should-be-empty mount directory %q: %v", intermediateMount, err)
}
}
for _, mountedImage := range mountedImages {
if _, err := store.UnmountImage(mountedImage, false); err != nil {
logrus.Debugf("unmounting image %q: %v", mountedImage, err)

View File

@ -11,12 +11,14 @@ import (
"github.com/containers/buildah/copier"
"github.com/containers/buildah/internal"
internalUtil "github.com/containers/buildah/internal/util"
internalVolumes "github.com/containers/buildah/internal/volumes"
"github.com/containers/buildah/pkg/overlay"
"github.com/containers/common/pkg/parse"
"github.com/containers/image/v5/types"
"github.com/containers/storage"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/lockfile"
"github.com/containers/storage/pkg/mount"
digest "github.com/opencontainers/go-digest"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
@ -106,9 +108,17 @@ func convertToOverlay(m specs.Mount, store storage.Store, mountLabel, tmpDir str
}
// GetBindMount parses a single bind mount entry from the --mount flag.
// Returns specifiedMount and a string which contains name of image that we mounted otherwise its empty.
// Caller is expected to perform unmount of any mounted images
func GetBindMount(sys *types.SystemContext, args []string, contextDir string, store storage.Store, mountLabel string, additionalMountPoints map[string]internal.StageMountDetails, tmpDir string) (specs.Mount, string, string, error) {
//
// Returns a Mount to add to the runtime spec's list of mounts, the ID of the
// image we mounted if we mounted one, the path of a mounted location if one
// needs to be unmounted and removed, and the path of an overlay mount if one
// needs to be cleaned up, or an error.
//
// The caller is expected to, after the command which uses the mount exits,
// clean up the overlay filesystem (if we provided a path to it), unmount and
// remove the mountpoint for the mounted filesystem (if we provided the path to
// its mountpoint), and then unmount the image (if we mounted one).
func GetBindMount(sys *types.SystemContext, args []string, contextDir string, store storage.Store, mountLabel string, additionalMountPoints map[string]internal.StageMountDetails, tmpDir string) (specs.Mount, string, string, string, error) {
newMount := specs.Mount{
Type: TypeBind,
}
@ -138,26 +148,26 @@ func GetBindMount(sys *types.SystemContext, args []string, contextDir string, st
newMount.Options = append(newMount.Options, kv[0])
case "from":
if len(kv) == 1 {
return newMount, "", "", errors.Wrapf(errBadOptionArg, kv[0])
return newMount, "", "", "", errors.Wrapf(errBadOptionArg, kv[0])
}
fromImage = kv[1]
case "bind-propagation":
if len(kv) == 1 {
return newMount, "", "", errors.Wrapf(errBadOptionArg, kv[0])
return newMount, "", "", "", errors.Wrapf(errBadOptionArg, kv[0])
}
newMount.Options = append(newMount.Options, kv[1])
case "src", "source":
if len(kv) == 1 {
return newMount, "", "", errors.Wrapf(errBadOptionArg, kv[0])
return newMount, "", "", "", errors.Wrapf(errBadOptionArg, kv[0])
}
newMount.Source = kv[1]
case "target", "dst", "destination":
if len(kv) == 1 {
return newMount, "", "", errors.Wrapf(errBadOptionArg, kv[0])
return newMount, "", "", "", errors.Wrapf(errBadOptionArg, kv[0])
}
setDest = kv[1]
if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil {
return newMount, "", "", err
return newMount, "", "", "", err
}
newMount.Destination = kv[1]
case "consistency":
@ -165,7 +175,7 @@ func GetBindMount(sys *types.SystemContext, args []string, contextDir string, st
// and can thus be safely ignored.
// See also the handling of the equivalent "delegated" and "cached" in ValidateVolumeOpts
default:
return newMount, "", "", errors.Wrapf(errBadMntOption, kv[0])
return newMount, "", "", "", errors.Wrapf(errBadMntOption, kv[0])
}
}
@ -189,12 +199,12 @@ func GetBindMount(sys *types.SystemContext, args []string, contextDir string, st
if mountPoint == "" {
image, err := internalUtil.LookupImage(sys, store, fromImage)
if err != nil {
return newMount, "", "", err
return newMount, "", "", "", err
}
mountPoint, err = image.Mount(context.Background(), nil, mountLabel)
if err != nil {
return newMount, "", "", err
return newMount, "", "", "", err
}
mountedImage = image.ID()
defer func() {
@ -215,7 +225,7 @@ func GetBindMount(sys *types.SystemContext, args []string, contextDir string, st
}
if setDest == "" {
return newMount, "", "", errBadVolDest
return newMount, "", "", "", errBadVolDest
}
// buildkit parity: support absolute path for sources from current build context
@ -223,40 +233,65 @@ func GetBindMount(sys *types.SystemContext, args []string, contextDir string, st
// path should be /contextDir/specified path
evaluated, err := copier.Eval(contextDir, contextDir+string(filepath.Separator)+newMount.Source, copier.EvalOptions{})
if err != nil {
return newMount, "", "", err
return newMount, "", "", "", err
}
newMount.Source = evaluated
} else {
// looks like its coming from `build run --mount=type=bind` allow using absolute path
// error out if no source is set
if newMount.Source == "" {
return newMount, "", "", errBadVolSrc
return newMount, "", "", "", errBadVolSrc
}
if err := parse.ValidateVolumeHostDir(newMount.Source); err != nil {
return newMount, "", "", err
return newMount, "", "", "", err
}
}
opts, err := parse.ValidateVolumeOpts(newMount.Options)
if err != nil {
return newMount, "", "", err
return newMount, "", "", "", err
}
newMount.Options = opts
var intermediateMount string
if contextDir != "" && newMount.Source != contextDir {
rel, err := filepath.Rel(contextDir, newMount.Source)
if err != nil {
return newMount, "", "", "", fmt.Errorf("computing pathname of bind subdirectory: %w", err)
}
if rel != "." && rel != "/" {
mnt, err := internalVolumes.BindFromChroot(contextDir, rel, tmpDir)
if err != nil {
return newMount, "", "", "", fmt.Errorf("sanitizing bind subdirectory %q: %w", newMount.Source, err)
}
logrus.Debugf("bind-mounted %q under %q to %q", rel, contextDir, mnt)
intermediateMount = mnt
newMount.Source = intermediateMount
}
}
overlayDir := ""
if mountedImage != "" || mountIsReadWrite(newMount) {
if newMount, overlayDir, err = convertToOverlay(newMount, store, mountLabel, tmpDir, 0, 0); err != nil {
return newMount, "", "", err
return newMount, "", "", "", err
}
}
succeeded = true
return newMount, mountedImage, overlayDir, nil
return newMount, mountedImage, intermediateMount, overlayDir, nil
}
// GetCacheMount parses a single cache mount entry from the --mount flag.
func GetCacheMount(args []string, store storage.Store, imageMountLabel string, additionalMountPoints map[string]internal.StageMountDetails) (specs.Mount, []string, error) {
//
// Returns a Mount to add to the runtime spec's list of mounts, the path of a
// mounted filesystem if one needs to be unmounted, and an optional lock that
// needs to be released, or an error.
//
// The caller is expected to, after the command which uses the mount exits,
// unmount and remove the mountpoint of the mounted filesystem (if we provided
// the path to its mountpoint).
func GetCacheMount(args []string, additionalMountPoints map[string]internal.StageMountDetails, tmpDir string) (specs.Mount, string, []string, error) {
var err error
var mode uint64
lockedTargets := make([]string, 0)
@ -300,67 +335,71 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a
sharing = kv[1]
case "bind-propagation":
if len(kv) == 1 {
return newMount, lockedTargets, errors.Wrapf(errBadOptionArg, kv[0])
return newMount, "", nil, errors.Wrapf(errBadOptionArg, kv[0])
}
newMount.Options = append(newMount.Options, kv[1])
case "id":
if len(kv) == 1 {
return newMount, lockedTargets, errors.Wrapf(errBadOptionArg, kv[0])
return newMount, "", nil, errors.Wrapf(errBadOptionArg, kv[0])
}
id = kv[1]
case "from":
if len(kv) == 1 {
return newMount, lockedTargets, errors.Wrapf(errBadOptionArg, kv[0])
return newMount, "", nil, errors.Wrapf(errBadOptionArg, kv[0])
}
fromStage = kv[1]
case "target", "dst", "destination":
if len(kv) == 1 {
return newMount, lockedTargets, errors.Wrapf(errBadOptionArg, kv[0])
return newMount, "", nil, errors.Wrapf(errBadOptionArg, kv[0])
}
if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil {
return newMount, lockedTargets, err
return newMount, "", nil, err
}
newMount.Destination = kv[1]
setDest = true
case "src", "source":
if len(kv) == 1 {
return newMount, lockedTargets, errors.Wrapf(errBadOptionArg, kv[0])
return newMount, "", nil, errors.Wrapf(errBadOptionArg, kv[0])
}
newMount.Source = kv[1]
case "mode":
if len(kv) == 1 {
return newMount, lockedTargets, errors.Wrapf(errBadOptionArg, kv[0])
return newMount, "", nil, errors.Wrapf(errBadOptionArg, kv[0])
}
mode, err = strconv.ParseUint(kv[1], 8, 32)
if err != nil {
return newMount, lockedTargets, errors.Wrapf(err, "Unable to parse cache mode")
return newMount, "", nil, errors.Wrapf(err, "Unable to parse cache mode")
}
case "uid":
if len(kv) == 1 {
return newMount, lockedTargets, errors.Wrapf(errBadOptionArg, kv[0])
return newMount, "", nil, errors.Wrapf(errBadOptionArg, kv[0])
}
uid, err = strconv.Atoi(kv[1])
if err != nil {
return newMount, lockedTargets, errors.Wrapf(err, "Unable to parse cache uid")
return newMount, "", nil, errors.Wrapf(err, "Unable to parse cache uid")
}
case "gid":
if len(kv) == 1 {
return newMount, lockedTargets, errors.Wrapf(errBadOptionArg, kv[0])
return newMount, "", nil, errors.Wrapf(errBadOptionArg, kv[0])
}
gid, err = strconv.Atoi(kv[1])
if err != nil {
return newMount, lockedTargets, errors.Wrapf(err, "Unable to parse cache gid")
return newMount, "", nil, errors.Wrapf(err, "Unable to parse cache gid")
}
default:
return newMount, lockedTargets, errors.Wrapf(errBadMntOption, kv[0])
return newMount, "", nil, errors.Wrapf(errBadMntOption, kv[0])
}
}
if !setDest {
return newMount, lockedTargets, errBadVolDest
return newMount, "", nil, errBadVolDest
}
thisCacheRoot := ""
if fromStage != "" {
// do not create and use a cache direcotry on the host,
// instead use the location in the mounted stage or
// temporary directory as the cache
mountPoint := ""
if additionalMountPoints != nil {
if val, ok := additionalMountPoints[fromStage]; ok {
@ -372,14 +411,9 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a
// Cache does not supports using image so if not stage found
// return with error
if mountPoint == "" {
return newMount, lockedTargets, fmt.Errorf("no stage or additional build context found with name %s", fromStage)
return newMount, "", nil, fmt.Errorf("no stage or additional build context found with name %s", fromStage)
}
// path should be /contextDir/specified path
evaluated, err := copier.Eval(mountPoint, mountPoint+string(filepath.Separator)+newMount.Source, copier.EvalOptions{})
if err != nil {
return newMount, nil, err
}
newMount.Source = evaluated
thisCacheRoot = mountPoint
} else {
// we need to create cache on host if no image is being used
@ -392,45 +426,58 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a
// create cache on host if not present
err = os.MkdirAll(cacheParent, os.FileMode(0755))
if err != nil {
return newMount, lockedTargets, errors.Wrapf(err, "Unable to create build cache directory")
return newMount, "", nil, errors.Wrapf(err, "Unable to create build cache directory")
}
if id != "" {
// Don't let the user control where we place the directory.
dirID := digest.FromString(id).Encoded()[:16]
newMount.Source = filepath.Join(cacheParent, dirID)
thisCacheRoot = filepath.Join(cacheParent, dirID)
} else {
// Don't let the user control where we place the directory.
dirID := digest.FromString(newMount.Destination).Encoded()[:16]
newMount.Source = filepath.Join(cacheParent, dirID)
thisCacheRoot = filepath.Join(cacheParent, dirID)
}
idPair := idtools.IDPair{
UID: uid,
GID: gid,
}
//buildkit parity: change uid and gid if specified otheriwise keep `0`
err = idtools.MkdirAllAndChownNew(newMount.Source, os.FileMode(mode), idPair)
// buildkit parity: change uid and gid if specified otheriwise keep `0`
err = idtools.MkdirAllAndChownNew(thisCacheRoot, os.FileMode(mode), idPair)
if err != nil {
return newMount, lockedTargets, errors.Wrapf(err, "Unable to change uid,gid of cache directory")
return newMount, "", nil, errors.Wrapf(err, "Unable to change uid,gid of cache directory")
}
}
// path should be /mountPoint/specified path
evaluated, err := copier.Eval(thisCacheRoot, thisCacheRoot+string(filepath.Separator)+newMount.Source, copier.EvalOptions{})
if err != nil {
return newMount, "", nil, err
}
newMount.Source = evaluated
succeeded := false
switch sharing {
case "locked":
// lock parent cache
lockfile, err := lockfile.GetLockfile(filepath.Join(newMount.Source, BuildahCacheLockfile))
if err != nil {
return newMount, lockedTargets, errors.Wrapf(err, "Unable to acquire lock when sharing mode is locked")
return newMount, "", nil, errors.Wrapf(err, "Unable to acquire lock when sharing mode is locked")
}
// Will be unlocked after the RUN step is executed.
lockfile.Lock()
lockedTargets = append(lockedTargets, filepath.Join(newMount.Source, BuildahCacheLockfile))
defer func() {
if !succeeded {
UnlockLockArray(lockedTargets)
}
}()
case "shared":
// do nothing since default is `shared`
break
default:
// error out for unknown values
return newMount, lockedTargets, errors.Wrapf(err, "Unrecognized value %q for field `sharing`", sharing)
return newMount, "", nil, errors.Wrapf(err, "Unrecognized value %q for field `sharing`", sharing)
}
// buildkit parity: default sharing should be shared
@ -448,11 +495,29 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a
opts, err := parse.ValidateVolumeOpts(newMount.Options)
if err != nil {
return newMount, lockedTargets, err
return newMount, "", nil, err
}
newMount.Options = opts
return newMount, lockedTargets, nil
var intermediateMount string
if newMount.Source != thisCacheRoot {
rel, err := filepath.Rel(thisCacheRoot, newMount.Source)
if err != nil {
return newMount, "", nil, fmt.Errorf("computing pathname of cache subdirectory: %w", err)
}
if rel != "." && rel != "/" {
mnt, err := internalVolumes.BindFromChroot(thisCacheRoot, rel, tmpDir)
if err != nil {
return newMount, "", nil, fmt.Errorf("sanitizing cache subdirectory %q: %w", newMount.Source, err)
}
logrus.Debugf("bind-mounted %q under %q to %q", rel, thisCacheRoot, mnt)
intermediateMount = mnt
newMount.Source = intermediateMount
}
}
succeeded = true
return newMount, intermediateMount, lockedTargets, nil
}
// ValidateVolumeMountHostDir validates the host path of buildah --volume
@ -539,23 +604,40 @@ func Volume(volume string) (specs.Mount, error) {
return mount, nil
}
// GetVolumes gets the volumes from --volume and --mount
func GetVolumes(ctx *types.SystemContext, store storage.Store, mountLabel string, volumes []string, mounts []string, contextDir string, tmpDir string) ([]specs.Mount, []string, []string, []string, error) {
unifiedMounts, mountedImages, overlayDirs, lockedTargets, err := getMounts(ctx, store, mountLabel, mounts, contextDir, tmpDir)
// GetVolumes gets the volumes from --volume and --mount flags.
//
// Returns a slice of Mounts to add to the runtime spec's list of mounts, the
// IDs of any images we mounted, a slice of bind-mounted paths, a slice of
// overlay directories and a slice of locks that we acquired, or an error.
//
// The caller is expected to, after the command which uses the mounts and
// volumes exits, clean up the overlay directories, unmount and remove the
// mountpoints for the bind-mounted paths, unmount any images we mounted, and
// release the locks we returned if any.
func GetVolumes(ctx *types.SystemContext, store storage.Store, mountLabel string, volumes []string, mounts []string, contextDir string, tmpDir string) ([]specs.Mount, []string, []string, []string, []string, error) {
unifiedMounts, mountedImages, intermediateMounts, overlayMounts, lockedTargets, err := getMounts(ctx, store, mountLabel, mounts, contextDir, tmpDir)
if err != nil {
return nil, nil, nil, nil, err
return nil, nil, nil, nil, nil, err
}
succeeded := false
defer func() {
if !succeeded {
for _, overlayDir := range overlayDirs {
if err := overlay.RemoveTemp(overlayDir); err != nil {
logrus.Debugf("unmounting overlay mount at %q: %v", overlayDir, err)
for _, overlayMount := range overlayMounts {
if err := overlay.RemoveTemp(overlayMount); err != nil {
logrus.Debugf("unmounting overlay mount at %q: %v", overlayMount, err)
}
}
for _, mountedImage := range mountedImages {
if _, err := store.UnmountImage(mountedImage, false); err != nil {
logrus.Debugf("unmounting image %q: %v", mountedImage, err)
for _, intermediateMount := range intermediateMounts {
if err := mount.Unmount(intermediateMount); err != nil {
logrus.Debugf("unmounting intermediate mount point %q: %v", intermediateMount, err)
}
if err := os.Remove(intermediateMount); err != nil {
logrus.Debugf("removing should-be-empty directory %q: %v", intermediateMount, err)
}
}
for _, image := range mountedImages {
if _, err := store.UnmountImage(image, false); err != nil {
logrus.Debugf("unmounting image %q: %v", image, err)
}
}
UnlockLockArray(lockedTargets)
@ -563,11 +645,11 @@ func GetVolumes(ctx *types.SystemContext, store storage.Store, mountLabel string
}()
volumeMounts, err := getVolumeMounts(volumes)
if err != nil {
return nil, nil, nil, nil, err
return nil, nil, nil, nil, nil, err
}
for dest, mount := range volumeMounts {
if _, ok := unifiedMounts[dest]; ok {
return nil, nil, nil, nil, errors.Wrapf(errDuplicateDest, dest)
return nil, nil, nil, nil, nil, errors.Wrapf(errDuplicateDest, dest)
}
unifiedMounts[dest] = mount
}
@ -577,29 +659,48 @@ func GetVolumes(ctx *types.SystemContext, store storage.Store, mountLabel string
finalMounts = append(finalMounts, mount)
}
succeeded = true
return finalMounts, mountedImages, overlayDirs, lockedTargets, nil
return finalMounts, mountedImages, intermediateMounts, overlayMounts, lockedTargets, nil
}
// getMounts takes user-provided input from the --mount flag and creates OCI
// spec mounts.
// buildah run --mount type=bind,src=/etc/resolv.conf,target=/etc/resolv.conf ...
// buildah run --mount type=tmpfs,target=/dev/shm ...
func getMounts(ctx *types.SystemContext, store storage.Store, mountLabel string, mounts []string, contextDir, tmpDir string) (map[string]specs.Mount, []string, []string, []string, error) {
// getMounts takes user-provided inputs from the --mount flag and returns a
// slice of OCI spec mounts, a slice of mounted image IDs, a slice of other
// mount locations, a slice of overlay mounts, and a slice of locks, or an
// error.
//
// buildah run --mount type=bind,src=/etc/resolv.conf,target=/etc/resolv.conf ...
// buildah run --mount type=cache,target=/var/cache ...
// buildah run --mount type=tmpfs,target=/dev/shm ...
//
// The caller is expected to, after the command which uses the mounts exits,
// unmount the overlay filesystems (if we mounted any), unmount the other
// mounted filesystems and remove their mountpoints (if we provided any paths
// to mountpoints), unmount any mounted images (if we provided the IDs of any),
// and then unlock the locks we returned if any.
func getMounts(ctx *types.SystemContext, store storage.Store, mountLabel string, mounts []string, contextDir, tmpDir string) (map[string]specs.Mount, []string, []string, []string, []string, error) {
finalMounts := make(map[string]specs.Mount, len(mounts))
mountedImages := make([]string, 0, len(mounts))
overlayDirs := make([]string, 0, len(mounts))
intermediateMounts := make([]string, 0, len(mounts))
overlayMounts := make([]string, 0, len(mounts))
lockedTargets := make([]string, 0, len(mounts))
succeeded := false
defer func() {
if !succeeded {
for _, overlayDir := range overlayDirs {
for _, overlayDir := range overlayMounts {
if err := overlay.RemoveTemp(overlayDir); err != nil {
logrus.Debugf("unmounting overlay mount at %q: %v", overlayDir, err)
}
}
for _, mountedImage := range mountedImages {
if _, err := store.UnmountImage(mountedImage, false); err != nil {
logrus.Debugf("unmounting image %q: %v", mountedImage, err)
for _, intermediateMount := range intermediateMounts {
if err := mount.Unmount(intermediateMount); err != nil {
logrus.Debugf("unmounting intermediate mount point %q: %v", intermediateMount, err)
}
if err := os.Remove(intermediateMount); err != nil {
logrus.Debugf("removing should-be-empty directory %q: %v", intermediateMount, err)
}
}
for _, image := range mountedImages {
if _, err := store.UnmountImage(image, false); err != nil {
logrus.Debugf("unmounting image %q: %v", image, err)
}
}
UnlockLockArray(lockedTargets)
@ -614,58 +715,64 @@ func getMounts(ctx *types.SystemContext, store storage.Store, mountLabel string,
for _, mount := range mounts {
arr := strings.SplitN(mount, ",", 2)
if len(arr) < 2 {
return nil, nil, nil, nil, errors.Wrapf(errInvalidSyntax, "%q", mount)
return nil, nil, nil, nil, nil, errors.Wrapf(errInvalidSyntax, "%q", mount)
}
kv := strings.Split(arr[0], "=")
// TODO: type is not explicitly required in Docker.
// If not specified, it defaults to "volume".
if len(kv) != 2 || kv[0] != "type" {
return nil, nil, nil, nil, errors.Wrapf(errInvalidSyntax, "%q", mount)
return nil, nil, nil, nil, nil, errors.Wrapf(errInvalidSyntax, "%q", mount)
}
tokens := strings.Split(arr[1], ",")
switch kv[1] {
case TypeBind:
mount, image, overlayDir, err := GetBindMount(ctx, tokens, contextDir, store, mountLabel, nil, tmpDir)
mount, image, intermediateMount, overlayMount, err := GetBindMount(ctx, tokens, contextDir, store, mountLabel, nil, tmpDir)
if err != nil {
return nil, nil, nil, nil, err
return nil, nil, nil, nil, nil, err
}
if image != "" {
mountedImages = append(mountedImages, image)
}
if overlayDir != "" {
overlayDirs = append(overlayDirs, overlayDir)
if intermediateMount != "" {
intermediateMounts = append(intermediateMounts, intermediateMount)
}
if overlayMount != "" {
overlayMounts = append(overlayMounts, overlayMount)
}
if _, ok := finalMounts[mount.Destination]; ok {
return nil, nil, nil, nil, errors.Wrapf(errDuplicateDest, mount.Destination)
return nil, nil, nil, nil, nil, errors.Wrapf(errDuplicateDest, mount.Destination)
}
finalMounts[mount.Destination] = mount
case TypeCache:
mount, lockedPaths, err := GetCacheMount(tokens, store, "", nil)
mount, intermediateMount, lockedPaths, err := GetCacheMount(tokens, nil, tmpDir)
lockedTargets = lockedPaths
if err != nil {
return nil, nil, nil, nil, err
return nil, nil, nil, nil, nil, err
}
if intermediateMount != "" {
intermediateMounts = append(intermediateMounts, intermediateMount)
}
if _, ok := finalMounts[mount.Destination]; ok {
return nil, nil, nil, nil, errors.Wrapf(errDuplicateDest, mount.Destination)
return nil, nil, nil, nil, nil, errors.Wrapf(errDuplicateDest, mount.Destination)
}
finalMounts[mount.Destination] = mount
case TypeTmpfs:
mount, err := GetTmpfsMount(tokens)
if err != nil {
return nil, nil, nil, nil, err
return nil, nil, nil, nil, nil, err
}
if _, ok := finalMounts[mount.Destination]; ok {
return nil, nil, nil, nil, errors.Wrapf(errDuplicateDest, mount.Destination)
return nil, nil, nil, nil, nil, errors.Wrapf(errDuplicateDest, mount.Destination)
}
finalMounts[mount.Destination] = mount
default:
return nil, nil, nil, nil, errors.Errorf("invalid filesystem type %q", kv[1])
return nil, nil, nil, nil, nil, errors.Errorf("invalid filesystem type %q", kv[1])
}
}
succeeded = true
return finalMounts, mountedImages, overlayDirs, lockedTargets, nil
return finalMounts, mountedImages, intermediateMounts, overlayMounts, lockedTargets, nil
}
// GetTmpfsMount parses a single tmpfs mount entry from the --mount flag

View File

@ -11,12 +11,12 @@ import (
"golang.org/x/sys/unix"
)
// bindFromChroot opens "path" inside of "root" using a chrooted subprocess
// BindFromChroot opens "path" inside of "root" using a chrooted subprocess
// that returns a descriptor, then creates a uniquely-named temporary directory
// or file under "tmp" and bind-mounts the opened descriptor to it, returning
// the path of the temporary file or directory. The caller is responsible for
// unmounting and removing the temporary.
func bindFromChroot(root, path, tmp string) (string, error) {
func BindFromChroot(root, path, tmp string) (string, error) {
fd, _, err := open.InChroot(root, "", path, unix.O_DIRECTORY|unix.O_RDONLY, 0)
if err != nil {
if !errors.Is(err, unix.ENOTDIR) {

View File

@ -20,11 +20,11 @@ func TestBindFromChroot(t *testing.T) {
require.NoError(t, os.Mkdir(filepath.Join(rootdir, "subdirectory"), 0o700), "creating bind mount source directory")
require.NoError(t, os.WriteFile(filepath.Join(rootdir, "subdirectory", "file"), []byte(contents1), 0o600))
require.NoError(t, os.WriteFile(filepath.Join(rootdir, "file"), []byte(contents2), 0o600))
subdir, err := bindFromChroot(rootdir, "subdirectory", destdir)
subdir, err := BindFromChroot(rootdir, "subdirectory", destdir)
require.NoError(t, err, "bind mounting from a directory")
bytes1, err := os.ReadFile(filepath.Join(subdir, "file"))
require.NoError(t, err, "reading file from bind-mounted directory")
subfile, err := bindFromChroot(rootdir, "file", destdir)
subfile, err := BindFromChroot(rootdir, "file", destdir)
require.NoError(t, err, "bind mounting from a file")
bytes2, err := os.ReadFile(subfile)
require.NoError(t, err, "reading file from bind mounted file")

View File

@ -4,12 +4,12 @@ package volumes
import "errors"
// bindFromChroot would open "path" inside of "root" using a chrooted
// BindFromChroot would open "path" inside of "root" using a chrooted
// subprocess that returns a descriptor, then would create a uniquely-named
// temporary directory or file under "tmp" and bind-mount the opened descriptor
// to it, returning the path of the temporary file or directory. The caller
// would be responsible for unmounting and removing the temporary. For now,
// this just returns an error because it is not implemented for this platform.
func bindFromChroot(root, path, tmp string) (string, error) {
func BindFromChroot(root, path, tmp string) (string, error) {
return "", errors.New("not available on this system")
}

2
run.go
View File

@ -179,4 +179,6 @@ type runMountArtifacts struct {
SSHAuthSock string
// LockedTargets to be unlocked if there are any.
LockedTargets []string
// Intermediate mount points, which should be Unmount()ed and Removed()d
IntermediateMounts []string
}

View File

@ -44,6 +44,7 @@ import (
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/lockfile"
"github.com/containers/storage/pkg/mount"
"github.com/containers/storage/pkg/reexec"
"github.com/containers/storage/pkg/stringid"
"github.com/containers/storage/pkg/unshare"
@ -2528,6 +2529,7 @@ func (b *Builder) runSetupRunMounts(context *imagetypes.SystemContext, bundlePat
mountTargets := make([]string, 0, len(mounts))
tmpFiles := make([]string, 0, len(mounts))
mountImages := make([]string, 0, len(mounts))
intermediateMounts := make([]string, 0, len(mounts))
finalMounts := make([]specs.Mount, 0, len(mounts))
agents := make([]*sshagent.AgentServer, 0, len(mounts))
defaultSSHSock := ""
@ -2548,6 +2550,14 @@ func (b *Builder) runSetupRunMounts(context *imagetypes.SystemContext, bundlePat
b.Logger.Error(err.Error())
}
}
for _, intermediateMount := range intermediateMounts {
if err := mount.Unmount(intermediateMount); err != nil {
b.Logger.Errorf("unmounting %q: %v", intermediateMount, err)
}
if err := os.Remove(intermediateMount); err != nil {
b.Logger.Errorf("removing should-be-empty directory %q: %v", intermediateMount, err)
}
}
for _, mountImage := range mountImages {
if _, err := b.store.UnmountImage(mountImage, false); err != nil {
b.Logger.Error(err.Error())
@ -2558,11 +2568,12 @@ func (b *Builder) runSetupRunMounts(context *imagetypes.SystemContext, bundlePat
b.Logger.Error(err.Error())
}
}
internalParse.UnlockLockArray(lockedTargets)
}
}()
for _, mount := range mounts {
var err error
var image, bundleMountsDir, overlayDir string
var image, bundleMountsDir, overlayDir, intermediateMount string
arr := strings.SplitN(mount, ",", 2)
kv := strings.Split(arr[0], "=")
@ -2606,7 +2617,7 @@ func (b *Builder) runSetupRunMounts(context *imagetypes.SystemContext, bundlePat
}
}
var mount *spec.Mount
mount, image, overlayDir, err = b.getBindMount(context, tokens, contextDir, rootUID, rootGID, processUID, processGID, stageMountPoints, bundleMountsDir)
mount, image, intermediateMount, overlayDir, err = b.getBindMount(context, tokens, contextDir, rootUID, rootGID, processUID, processGID, stageMountPoints, bundleMountsDir)
if err != nil {
return nil, nil, err
}
@ -2617,6 +2628,9 @@ func (b *Builder) runSetupRunMounts(context *imagetypes.SystemContext, bundlePat
if overlayDir != "" {
overlayDirs = append(overlayDirs, overlayDir)
}
if intermediateMount != "" {
intermediateMounts = append(intermediateMounts, intermediateMount)
}
finalMounts = append(finalMounts, *mount)
case "tmpfs":
mount, err := b.getTmpfsMount(tokens, rootUID, rootGID, processUID, processGID)
@ -2626,10 +2640,18 @@ func (b *Builder) runSetupRunMounts(context *imagetypes.SystemContext, bundlePat
finalMounts = append(finalMounts, *mount)
mountTargets = append(mountTargets, mount.Destination)
case "cache":
mount, lockedPaths, err := b.getCacheMount(tokens, rootUID, rootGID, processUID, processGID, stageMountPoints)
if bundleMountsDir == "" {
if bundleMountsDir, err = os.MkdirTemp(bundlePath, "mounts"); err != nil {
return nil, nil, err
}
}
mount, intermediateMount, lockedPaths, err := b.getCacheMount(tokens, rootUID, rootGID, processUID, processGID, stageMountPoints, bundleMountsDir)
if err != nil {
return nil, nil, err
}
if intermediateMount != "" {
intermediateMounts = append(intermediateMounts, intermediateMount)
}
finalMounts = append(finalMounts, *mount)
mountTargets = append(mountTargets, mount.Destination)
lockedTargets = lockedPaths
@ -2638,33 +2660,34 @@ func (b *Builder) runSetupRunMounts(context *imagetypes.SystemContext, bundlePat
}
}
artifacts := &runMountArtifacts{
RunMountTargets: mountTargets,
RunOverlayDirs: overlayDirs,
TmpFiles: tmpFiles,
Agents: agents,
MountedImages: mountImages,
SSHAuthSock: defaultSSHSock,
LockedTargets: lockedTargets,
RunMountTargets: mountTargets,
RunOverlayDirs: overlayDirs,
TmpFiles: tmpFiles,
Agents: agents,
MountedImages: mountImages,
SSHAuthSock: defaultSSHSock,
LockedTargets: lockedTargets,
IntermediateMounts: intermediateMounts,
}
succeeded = true
return finalMounts, artifacts, nil
}
func (b *Builder) getBindMount(sys *imagetypes.SystemContext, tokens []string, contextDir string, rootUID, rootGID, processUID, processGID int, stageMountPoints map[string]internal.StageMountDetails, tmpDir string) (*spec.Mount, string, string, error) {
func (b *Builder) getBindMount(sys *imagetypes.SystemContext, tokens []string, contextDir string, rootUID, rootGID, processUID, processGID int, stageMountPoints map[string]internal.StageMountDetails, tmpDir string) (*spec.Mount, string, string, string, error) {
if contextDir == "" {
return nil, "", "", errors.New("Context Directory for current run invocation is not configured")
return nil, "", "", "", errors.New("Context Directory for current run invocation is not configured")
}
var optionMounts []specs.Mount
mount, image, overlayDir, err := internalParse.GetBindMount(sys, tokens, contextDir, b.store, b.MountLabel, stageMountPoints, tmpDir)
mount, image, intermediateMount, overlayMount, err := internalParse.GetBindMount(sys, tokens, contextDir, b.store, b.MountLabel, stageMountPoints, tmpDir)
if err != nil {
return nil, image, overlayDir, err
return nil, "", "", "", err
}
optionMounts = append(optionMounts, mount)
volumes, err := b.runSetupVolumeMounts(b.MountLabel, nil, optionMounts, rootUID, rootGID, processUID, processGID)
if err != nil {
return nil, image, overlayDir, err
return nil, "", "", "", err
}
return &volumes[0], image, overlayDir, nil
return &volumes[0], image, intermediateMount, overlayMount, nil
}
func (b *Builder) getTmpfsMount(tokens []string, rootUID, rootGID, processUID, processGID int) (*spec.Mount, error) {
@ -2681,18 +2704,39 @@ func (b *Builder) getTmpfsMount(tokens []string, rootUID, rootGID, processUID, p
return &volumes[0], nil
}
func (b *Builder) getCacheMount(tokens []string, rootUID, rootGID, processUID, processGID int, stageMountPoints map[string]internal.StageMountDetails) (*spec.Mount, []string, error) {
// Returns a Mount to add to the runtime spec's list of mounts, an optional
// path of a mounted filesystem, unmounted, and an optional lock, or an error.
//
// The caller is expected to, after the command which uses the mount exits,
// unmount the mounted filesystem (if we provided the path to its mountpoint)
// and remove its mountpoint, , and release the lock (if we took one).
func (b *Builder) getCacheMount(tokens []string, rootUID, rootGID, processUID, processGID int, stageMountPoints map[string]internal.StageMountDetails, tmpDir string) (*spec.Mount, string, []string, error) {
var optionMounts []specs.Mount
mount, lockedTargets, err := internalParse.GetCacheMount(tokens, b.store, b.MountLabel, stageMountPoints)
optionMount, intermediateMount, lockedTargets, err := internalParse.GetCacheMount(tokens, stageMountPoints, tmpDir)
if err != nil {
return nil, lockedTargets, err
return nil, "", nil, err
}
optionMounts = append(optionMounts, mount)
succeeded := false
defer func() {
if !succeeded {
if intermediateMount != "" {
if err := mount.Unmount(intermediateMount); err != nil {
b.Logger.Debugf("unmounting %q: %v", intermediateMount, err)
}
if err := os.Remove(intermediateMount); err != nil {
b.Logger.Debugf("removing should-be-empty directory %q: %v", intermediateMount, err)
}
}
internalParse.UnlockLockArray(lockedTargets)
}
}()
optionMounts = append(optionMounts, optionMount)
volumes, err := b.runSetupVolumeMounts(b.MountLabel, nil, optionMounts, rootUID, rootGID, processUID, processGID)
if err != nil {
return nil, lockedTargets, err
return nil, "", nil, err
}
return &volumes[0], lockedTargets, nil
succeeded = true
return &volumes[0], intermediateMount, lockedTargets, nil
}
func getSecretMount(tokens []string, secrets map[string]define.Secret, mountlabel string, containerWorkingDir string, uidmap []spec.LinuxIDMapping, gidmap []spec.LinuxIDMapping) (*spec.Mount, string, error) {
@ -2926,9 +2970,9 @@ func (b *Builder) getSSHMount(tokens []string, count int, sshsources map[string]
// cleanupRunMounts cleans up run mounts so they only appear in this run.
func (b *Builder) cleanupRunMounts(mountpoint string, artifacts *runMountArtifacts) error {
for _, agent := range artifacts.Agents {
err := agent.Shutdown()
if err != nil {
return err
servePath := agent.ServePath()
if err := agent.Shutdown(); err != nil {
return fmt.Errorf("shutting down SSH agent at %q: %v", servePath, err)
}
}
@ -2938,6 +2982,15 @@ func (b *Builder) cleanupRunMounts(mountpoint string, artifacts *runMountArtifac
return err
}
}
// unmounting anything that needs unmounting
for _, intermediateMount := range artifacts.IntermediateMounts {
if err := mount.Unmount(intermediateMount); err != nil && !errors.Is(err, os.ErrNotExist) {
return fmt.Errorf("unmounting %q: %w", intermediateMount, err)
}
if err := os.Remove(intermediateMount); err != nil && !errors.Is(err, os.ErrNotExist) {
return fmt.Errorf("removing should-be-empty directory %q: %w", intermediateMount, err)
}
}
// unmount any images we mounted for this run
for _, image := range artifacts.MountedImages {
if _, err := b.store.UnmountImage(image, false); err != nil {

View File

@ -4649,8 +4649,8 @@ RUN --mount=type=cache,source=../../../../../../../../../../../$TEST_SCRATCH_DIR
ls -l /var/tmp && cat /var/tmp/file.txt
EOF
run_buildah 1 build --no-cache $searg ${TEST_SCRATCH_DIR}
expect_output --substring "cat: can't open '/var/tmp/file.txt': No such file or directory"
run_buildah 125 build --no-cache $searg ${TEST_SCRATCH_DIR}
expect_output --substring "no such file or directory"
mkdir ${TEST_SCRATCH_DIR}/cve20249675
cat > ${TEST_SCRATCH_DIR}/cve20249675/Containerfile <<EOF

View File

@ -1,2 +1,3 @@
FROM scratch
COPY hello .
COPY hello hello1 .
COPY hello2 /subdir/hello

View File

@ -0,0 +1 @@
hello1

View File

@ -427,8 +427,10 @@ function configure_and_check_user() {
_prefetch alpine
run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
cid=$output
run_buildah run --mount type=bind,source=.,from=buildkitbase,target=/test${zflag} $cid cat /test/hello
expect_output --substring "hello"
run_buildah run --mount type=bind,source=.,from=buildkitbase,target=/test${zflag} $cid cat /test/hello1
expect_output --substring "hello1"
run_buildah run --mount type=bind,source=subdir,from=buildkitbase,target=/test${zflag} $cid cat /test/hello
expect_output --substring "hello2"
}
@test "run --mount=type=cache like buildkit" {
@ -442,8 +444,8 @@ function configure_and_check_user() {
_prefetch alpine
run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
cid=$output
run_buildah run --mount type=cache,target=/test${zflag} $cid sh -c 'echo "hello" > /test/hello && cat /test/hello'
run_buildah run --mount type=cache,target=/test${zflag} $cid cat /test/hello
run_buildah run --mount type=cache,target=/test${zflag} $cid sh -c 'mkdir -p /test/subdir && echo "hello" > /test/subdir/h.txt && cat /test/subdir/h.txt'
run_buildah run --mount type=cache,src=subdir,target=/test${zflag} $cid cat /test/h.txt
expect_output --substring "hello"
}