2017-03-07 04:39:22 +08:00
package buildah
import (
2019-07-25 22:10:03 +08:00
"archive/tar"
2024-07-20 02:07:25 +08:00
"context"
"crypto/tls"
2022-07-06 17:14:06 +08:00
"errors"
2019-07-25 22:10:03 +08:00
"fmt"
2017-03-07 04:39:22 +08:00
"io"
"net/http"
2017-03-08 02:26:17 +08:00
"net/url"
2017-03-07 04:39:22 +08:00
"os"
"path"
"path/filepath"
2025-02-17 21:29:46 +08:00
"slices"
2021-02-24 07:14:04 +08:00
"strconv"
2017-03-07 04:39:22 +08:00
"strings"
2019-07-25 22:10:03 +08:00
"sync"
2017-07-17 22:42:58 +08:00
"syscall"
2017-03-28 15:01:59 +08:00
"time"
2017-03-07 04:39:22 +08:00
2019-07-25 22:10:03 +08:00
"github.com/containers/buildah/copier"
2021-02-07 06:49:40 +08:00
"github.com/containers/buildah/define"
2024-03-27 22:23:26 +08:00
"github.com/containers/buildah/internal/tmpdir"
2019-03-25 18:23:56 +08:00
"github.com/containers/buildah/pkg/chrootuser"
2024-07-20 02:07:25 +08:00
"github.com/containers/common/pkg/retry"
"github.com/containers/image/v5/pkg/tlsclientconfig"
"github.com/containers/image/v5/types"
2019-05-30 07:56:28 +08:00
"github.com/containers/storage/pkg/fileutils"
2018-03-17 05:19:29 +08:00
"github.com/containers/storage/pkg/idtools"
2024-04-22 21:55:36 +08:00
"github.com/containers/storage/pkg/regexp"
2024-07-20 02:07:25 +08:00
"github.com/docker/go-connections/tlsconfig"
2019-07-25 22:10:03 +08:00
"github.com/hashicorp/go-multierror"
2024-10-23 08:24:51 +08:00
"github.com/moby/sys/userns"
2023-11-10 07:29:06 +08:00
digest "github.com/opencontainers/go-digest"
2025-06-14 02:26:23 +08:00
v1 "github.com/opencontainers/image-spec/specs-go/v1"
2017-11-30 22:34:02 +08:00
"github.com/opencontainers/runtime-spec/specs-go"
2017-10-10 03:05:56 +08:00
"github.com/sirupsen/logrus"
2017-03-07 04:39:22 +08:00
)
2018-06-08 22:52:52 +08:00
// AddAndCopyOptions holds options for add and copy commands.
2017-11-30 22:34:02 +08:00
type AddAndCopyOptions struct {
2024-08-16 00:50:07 +08:00
// Chmod sets the access permissions of the destination content.
2021-02-24 07:14:04 +08:00
Chmod string
2018-06-08 22:52:52 +08:00
// Chown is a spec for the user who should be given ownership over the
// newly-added content, potentially overriding permissions which would
2020-08-21 23:42:49 +08:00
// otherwise be set to 0:0.
2017-11-30 22:34:02 +08:00
Chown string
2023-11-10 07:29:06 +08:00
// Checksum is a standard container digest string (e.g. <algorithm>:<digest>)
// and is the expected hash of the content being copied.
Checksum string
2020-08-21 23:42:49 +08:00
// PreserveOwnership, if Chown is not set, tells us to avoid setting
// ownership of copied items to 0:0, instead using whatever ownership
2020-09-30 03:01:27 +08:00
// information is already set. Not meaningful for remote sources or
// local archives that we extract.
2020-08-21 23:42:49 +08:00
PreserveOwnership bool
2018-06-08 22:52:52 +08:00
// All of the data being copied will pass through Hasher, if set.
// If the sources are URLs or files, their contents will be passed to
// Hasher.
// If the sources include directory trees, Hasher will be passed
// tar-format archives of the directory trees.
Hasher io . Writer
2021-10-07 21:10:22 +08:00
// Excludes is the contents of the .containerignore file.
2019-03-20 02:28:54 +08:00
Excludes [ ] string
2021-10-07 21:10:22 +08:00
// IgnoreFile is the path to the .containerignore file.
IgnoreFile string
2019-07-25 22:10:03 +08:00
// ContextDir is the base directory for content being copied and
// Excludes patterns.
2019-03-20 02:28:54 +08:00
ContextDir string
2019-05-30 12:22:53 +08:00
// ID mapping options to use when contents to be copied are part of
// another container, and need ownerships to be mapped from the host to
// that container's values before copying them into the container.
2021-02-07 06:49:40 +08:00
IDMappingOptions * define . IDMappingOptions
2019-06-08 06:02:50 +08:00
// DryRun indicates that the content should be digested, but not actually
// copied into the container.
DryRun bool
2019-07-25 22:10:03 +08:00
// Clear the setuid bit on items being copied. Has no effect on
// archives being extracted, where the bit is always preserved.
StripSetuidBit bool
// Clear the setgid bit on items being copied. Has no effect on
// archives being extracted, where the bit is always preserved.
StripSetgidBit bool
// Clear the sticky bit on items being copied. Has no effect on
// archives being extracted, where the bit is always preserved.
StripStickyBit bool
2024-07-20 02:07:25 +08:00
// If not "", a directory containing a CA certificate (ending with
// ".crt"), a client certificate (ending with ".cert") and a client
// certificate key (ending with ".key") used when downloading sources
// from locations protected with TLS.
CertPath string
// Allow downloading sources from HTTPS where TLS verification fails.
InsecureSkipTLSVerify types . OptionalBool
// MaxRetries is the maximum number of attempts we'll make to retrieve
// contents from a remote location.
MaxRetries int
// RetryDelay is how long to wait before retrying attempts to retrieve
// remote contents.
RetryDelay time . Duration
2025-04-30 04:48:01 +08:00
// Parents specifies that we should preserve either all of the parent
// directories of source locations, or the ones which follow "/./" in
// the source paths for source locations which include such a
// component.
2025-02-17 21:29:46 +08:00
Parents bool
2025-04-30 04:48:01 +08:00
// Timestamp is a timestamp to override on all content as it is being read.
Timestamp * time . Time
2025-06-14 02:26:23 +08:00
// Link, when set to true, creates an independent layer containing the copied content
// that sits on top of existing layers. This layer can be cached and reused
// separately, and is not affected by filesystem changes from previous instructions.
Link bool
// BuildMetadata is consulted only when Link is true. Contains metadata used by
// imagebuildah for cache evaluation of linked layers (inheritLabels, unsetAnnotations,
// inheritAnnotations, newAnnotations). This field is internally managed and should
// not be set by external API users.
BuildMetadata string
2017-11-30 22:34:02 +08:00
}
2024-04-22 21:55:36 +08:00
// gitURLFragmentSuffix matches fragments to use as Git reference and build
// context from the Git repository e.g.
//
// github.com/containers/buildah.git
// github.com/containers/buildah.git#main
// github.com/containers/buildah.git#v1.35.0
var gitURLFragmentSuffix = regexp . Delayed ( ` \.git(?:#.+)?$ ` )
2024-03-27 22:23:26 +08:00
// sourceIsGit returns true if "source" is a git location.
func sourceIsGit ( source string ) bool {
2024-04-22 21:55:36 +08:00
return isURL ( source ) && gitURLFragmentSuffix . MatchString ( source )
}
func isURL ( url string ) bool {
return strings . HasPrefix ( url , "http://" ) || strings . HasPrefix ( url , "https://" )
2024-03-27 22:23:26 +08:00
}
2024-04-22 21:55:36 +08:00
// sourceIsRemote returns true if "source" is a remote location
// and *not* a git repo. Certain github urls such as raw.github.* are allowed.
2019-07-25 22:10:03 +08:00
func sourceIsRemote ( source string ) bool {
2024-04-22 21:55:36 +08:00
return isURL ( source ) && ! gitURLFragmentSuffix . MatchString ( source )
2019-07-25 22:10:03 +08:00
}
// getURL writes a tar archive containing the named content
2025-04-30 04:48:01 +08:00
func getURL ( src string , chown * idtools . IDPair , mountpoint , renameTarget string , writer io . Writer , chmod * os . FileMode , srcDigest digest . Digest , certPath string , insecureSkipTLSVerify types . OptionalBool , timestamp * time . Time ) error {
2019-07-25 22:10:03 +08:00
url , err := url . Parse ( src )
2017-03-07 04:39:22 +08:00
if err != nil {
2020-10-15 17:16:50 +08:00
return err
2017-03-07 04:39:22 +08:00
}
2024-07-20 02:07:25 +08:00
tlsClientConfig := & tls . Config {
CipherSuites : tlsconfig . DefaultServerAcceptedCiphers ,
}
if err := tlsclientconfig . SetupCertificates ( certPath , tlsClientConfig ) ; err != nil {
return err
}
tlsClientConfig . InsecureSkipVerify = insecureSkipTLSVerify == types . OptionalBoolTrue
tr := & http . Transport { TLSClientConfig : tlsClientConfig }
httpClient := & http . Client { Transport : tr }
response , err := httpClient . Get ( src )
2019-07-25 22:10:03 +08:00
if err != nil {
2020-10-15 17:16:50 +08:00
return err
2019-08-10 04:21:24 +08:00
}
2019-07-25 22:10:03 +08:00
defer response . Body . Close ( )
2022-07-01 18:21:11 +08:00
if response . StatusCode < http . StatusOK || response . StatusCode >= http . StatusBadRequest {
return fmt . Errorf ( "invalid response status %d" , response . StatusCode )
}
2019-07-25 22:10:03 +08:00
// Figure out what to name the new content.
name := renameTarget
if name == "" {
name = path . Base ( url . Path )
2019-08-10 04:21:24 +08:00
}
2019-07-25 22:10:03 +08:00
// If there's a date on the content, use it. If not, use the Unix epoch
2025-04-30 04:48:01 +08:00
// or a specified value for compatibility.
2019-07-25 22:10:03 +08:00
date := time . Unix ( 0 , 0 ) . UTC ( )
2025-04-30 04:48:01 +08:00
if timestamp != nil {
date = timestamp . UTC ( )
} else {
lastModified := response . Header . Get ( "Last-Modified" )
if lastModified != "" {
d , err := time . Parse ( time . RFC1123 , lastModified )
if err != nil {
return fmt . Errorf ( "parsing last-modified time %q: %w" , lastModified , err )
}
date = d . UTC ( )
2019-06-08 06:02:50 +08:00
}
2019-07-25 22:10:03 +08:00
}
// Figure out the size of the content.
size := response . ContentLength
2023-11-10 07:29:06 +08:00
var responseBody io . Reader = response . Body
2019-07-25 22:10:03 +08:00
if size < 0 {
// Create a temporary file and copy the content to it, so that
// we can figure out how much content there is.
2022-11-15 00:22:45 +08:00
f , err := os . CreateTemp ( mountpoint , "download" )
2019-07-25 22:10:03 +08:00
if err != nil {
2022-09-18 18:36:08 +08:00
return fmt . Errorf ( "creating temporary file to hold %q: %w" , src , err )
2019-07-25 22:10:03 +08:00
}
defer os . Remove ( f . Name ( ) )
2019-06-08 06:02:50 +08:00
defer f . Close ( )
2019-07-25 22:10:03 +08:00
size , err = io . Copy ( f , response . Body )
if err != nil {
2022-09-18 18:36:08 +08:00
return fmt . Errorf ( "writing %q to temporary file %q: %w" , src , f . Name ( ) , err )
2019-06-08 06:02:50 +08:00
}
2019-07-25 22:10:03 +08:00
_ , err = f . Seek ( 0 , io . SeekStart )
if err != nil {
2022-09-18 18:36:08 +08:00
return fmt . Errorf ( "setting up to read %q from temporary file %q: %w" , src , f . Name ( ) , err )
2019-06-08 06:02:50 +08:00
}
2019-07-25 22:10:03 +08:00
responseBody = f
2017-03-28 15:01:59 +08:00
}
2023-11-10 07:29:06 +08:00
var digester digest . Digester
if srcDigest != "" {
digester = srcDigest . Algorithm ( ) . Digester ( )
responseBody = io . TeeReader ( responseBody , digester . Hash ( ) )
}
2019-07-25 22:10:03 +08:00
// Write the output archive. Set permissions for compatibility.
tw := tar . NewWriter ( writer )
defer tw . Close ( )
2020-11-05 06:23:25 +08:00
uid := 0
gid := 0
if chown != nil {
uid = chown . UID
gid = chown . GID
}
2024-08-16 00:50:07 +08:00
var mode int64 = 0 o600
2021-02-24 07:14:04 +08:00
if chmod != nil {
mode = int64 ( * chmod )
}
2019-07-25 22:10:03 +08:00
hdr := tar . Header {
Typeflag : tar . TypeReg ,
Name : name ,
Size : size ,
2020-11-05 06:23:25 +08:00
Uid : uid ,
Gid : gid ,
2021-02-24 07:14:04 +08:00
Mode : mode ,
2019-07-25 22:10:03 +08:00
ModTime : date ,
2017-04-04 05:44:23 +08:00
}
2019-07-25 22:10:03 +08:00
err = tw . WriteHeader ( & hdr )
if err != nil {
2022-09-18 18:36:08 +08:00
return fmt . Errorf ( "writing header: %w" , err )
2017-03-07 04:39:22 +08:00
}
2022-07-06 17:14:06 +08:00
if _ , err := io . Copy ( tw , responseBody ) ; err != nil {
2022-09-18 18:36:08 +08:00
return fmt . Errorf ( "writing content from %q to tar stream: %w" , src , err )
2022-07-06 17:14:06 +08:00
}
2023-11-10 07:29:06 +08:00
if digester != nil {
if responseDigest := digester . Digest ( ) ; responseDigest != srcDigest {
return fmt . Errorf ( "unexpected response digest for %q: %s, want %s" , src , responseDigest , srcDigest )
}
}
2022-07-06 17:14:06 +08:00
return nil
2017-03-07 04:39:22 +08:00
}
2020-10-06 05:43:49 +08:00
// includeDirectoryAnyway returns true if "path" is a prefix for an exception
// known to "pm". If "path" is a directory that "pm" claims matches its list
// of patterns, but "pm"'s list of exclusions contains a pattern for which
// "path" is a prefix, then IncludeDirectoryAnyway() will return true.
// This is not always correct, because it relies on the directory part of any
// exception paths to be specified without wildcards.
func includeDirectoryAnyway ( path string , pm * fileutils . PatternMatcher ) bool {
if ! pm . Exclusions ( ) {
return false
}
prefix := strings . TrimPrefix ( path , string ( os . PathSeparator ) ) + string ( os . PathSeparator )
for _ , pattern := range pm . Patterns ( ) {
if ! pattern . Exclusion ( ) {
continue
}
spec := strings . TrimPrefix ( pattern . String ( ) , string ( os . PathSeparator ) )
if strings . HasPrefix ( spec , prefix ) {
return true
}
}
return false
}
2024-08-09 02:20:31 +08:00
// globbedToGlobbable takes a pathname which might include the '[', *, or ?
// characters, and converts it into a glob pattern that matches itself by
// marking the '[' characters as _not_ the beginning of match ranges and
// escaping the * and ? characters.
func globbedToGlobbable ( glob string ) string {
result := glob
result = strings . ReplaceAll ( result , "[" , "[[]" )
result = strings . ReplaceAll ( result , "?" , "\\?" )
result = strings . ReplaceAll ( result , "*" , "\\*" )
return result
}
2025-02-17 21:29:46 +08:00
// getParentsPrefixToRemoveAndParentsToSkip gets from the pattern the prefix before the "pivot point",
// the location in the source path marked by the path component named "."
// (i.e. where "/./" occurs in the path). And list of parents to skip.
// In case "/./" is not present is returned "/".
func getParentsPrefixToRemoveAndParentsToSkip ( pattern string , contextDir string ) ( string , [ ] string ) {
prefix , _ , found := strings . Cut ( strings . TrimPrefix ( pattern , contextDir ) , "/./" )
if ! found {
return string ( filepath . Separator ) , [ ] string { }
}
prefix = strings . TrimPrefix ( filepath . Clean ( string ( filepath . Separator ) + prefix ) , string ( filepath . Separator ) )
out := [ ] string { }
parentPath := prefix
for parentPath != "/" && parentPath != "." {
out = append ( out , parentPath )
parentPath = filepath . Dir ( parentPath )
}
return prefix , out
}
2017-03-24 01:48:23 +08:00
// Add copies the contents of the specified sources into the container's root
// filesystem, optionally extracting contents of local files that look like
// non-empty archives.
2019-07-25 22:10:03 +08:00
func ( b * Builder ) Add ( destination string , extract bool , options AddAndCopyOptions , sources ... string ) error {
2017-11-22 21:57:31 +08:00
mountPoint , err := b . Mount ( b . MountLabel )
2017-03-24 01:48:23 +08:00
if err != nil {
return err
2017-03-07 04:39:22 +08:00
}
2017-03-24 01:48:23 +08:00
defer func ( ) {
if err2 := b . Unmount ( ) ; err2 != nil {
logrus . Errorf ( "error unmounting container: %v" , err2 )
}
} ( )
2019-07-25 22:10:03 +08:00
contextDir := options . ContextDir
2020-09-16 22:52:01 +08:00
currentDir := options . ContextDir
if options . ContextDir == "" {
2019-07-25 22:10:03 +08:00
contextDir = string ( os . PathSeparator )
2020-09-16 22:52:01 +08:00
currentDir , err = os . Getwd ( )
if err != nil {
2022-09-18 18:36:08 +08:00
return fmt . Errorf ( "determining current working directory: %w" , err )
2020-09-16 22:52:01 +08:00
}
2022-03-01 23:42:56 +08:00
} else {
if ! filepath . IsAbs ( options . ContextDir ) {
contextDir , err = filepath . Abs ( options . ContextDir )
if err != nil {
2022-09-18 18:36:08 +08:00
return fmt . Errorf ( "converting context directory path %q to an absolute path: %w" , options . ContextDir , err )
2022-03-01 23:42:56 +08:00
}
}
2018-03-17 05:19:29 +08:00
}
2019-07-25 22:10:03 +08:00
// Figure out what sorts of sources we have.
2024-03-27 22:23:26 +08:00
var localSources , remoteSources , gitSources [ ] string
2020-09-16 22:52:01 +08:00
for i , src := range sources {
2024-07-20 02:07:25 +08:00
if src == "" {
return errors . New ( "empty source location" )
}
2019-07-25 22:10:03 +08:00
if sourceIsRemote ( src ) {
remoteSources = append ( remoteSources , src )
continue
2019-05-26 20:56:13 +08:00
}
2024-03-27 22:23:26 +08:00
if sourceIsGit ( src ) {
gitSources = append ( gitSources , src )
continue
}
2020-09-16 22:52:01 +08:00
if ! filepath . IsAbs ( src ) && options . ContextDir == "" {
sources [ i ] = filepath . Join ( currentDir , src )
}
localSources = append ( localSources , sources [ i ] )
2019-07-25 22:10:03 +08:00
}
2024-03-27 22:23:26 +08:00
// Treat git sources as a subset of remote sources
// differentiating only in how we fetch the two later on.
if len ( gitSources ) > 0 {
remoteSources = append ( remoteSources , gitSources ... )
}
2019-07-25 22:10:03 +08:00
// Check how many items our local source specs matched. Each spec
// should have matched at least one item, otherwise we consider it an
// error.
var localSourceStats [ ] * copier . StatsForGlob
if len ( localSources ) > 0 {
statOptions := copier . StatOptions {
CheckForArchives : extract ,
2017-03-24 01:47:07 +08:00
}
2019-07-25 22:10:03 +08:00
localSourceStats , err = copier . Stat ( contextDir , contextDir , statOptions , localSources )
if err != nil {
2022-07-06 17:14:06 +08:00
return fmt . Errorf ( "checking on sources under %q: %w" , contextDir , err )
2017-03-28 03:35:09 +08:00
}
}
2019-07-25 22:10:03 +08:00
numLocalSourceItems := 0
for _ , localSourceStat := range localSourceStats {
if localSourceStat . Error != "" {
errorText := localSourceStat . Error
rel , err := filepath . Rel ( contextDir , localSourceStat . Glob )
if err != nil {
errorText = fmt . Sprintf ( "%v; %s" , err , errorText )
}
if strings . HasPrefix ( rel , ".." + string ( os . PathSeparator ) ) {
errorText = fmt . Sprintf ( "possible escaping context directory error: %s" , errorText )
}
2022-07-06 17:14:06 +08:00
return fmt . Errorf ( "checking on sources under %q: %v" , contextDir , errorText )
2017-03-24 01:49:38 +08:00
}
2019-07-25 22:10:03 +08:00
if len ( localSourceStat . Globbed ) == 0 {
2022-07-06 17:14:06 +08:00
return fmt . Errorf ( "checking source under %q: no glob matches: %w" , contextDir , syscall . ENOENT )
2019-07-25 22:10:03 +08:00
}
numLocalSourceItems += len ( localSourceStat . Globbed )
2017-03-24 01:49:38 +08:00
}
2024-03-27 22:23:26 +08:00
if numLocalSourceItems + len ( remoteSources ) + len ( gitSources ) == 0 {
2022-07-06 17:14:06 +08:00
return fmt . Errorf ( "no sources %v found: %w" , sources , syscall . ENOENT )
2017-03-08 02:26:17 +08:00
}
2019-03-20 02:28:54 +08:00
2019-07-25 22:10:03 +08:00
// Find out which user (and group) the destination should belong to.
var chownDirs , chownFiles * idtools . IDPair
2021-03-23 00:00:02 +08:00
var userUID , userGID uint32
2019-07-25 22:10:03 +08:00
if options . Chown != "" {
2021-03-23 00:00:02 +08:00
userUID , userGID , err = b . userForCopy ( mountPoint , options . Chown )
2019-07-25 22:10:03 +08:00
if err != nil {
2022-09-18 18:36:08 +08:00
return fmt . Errorf ( "looking up UID/GID for %q: %w" , options . Chown , err )
2019-07-25 22:10:03 +08:00
}
2019-03-20 02:28:54 +08:00
}
2021-02-24 07:14:04 +08:00
var chmodDirsFiles * os . FileMode
if options . Chmod != "" {
p , err := strconv . ParseUint ( options . Chmod , 8 , 32 )
if err != nil {
2022-09-18 18:36:08 +08:00
return fmt . Errorf ( "parsing chmod %q: %w" , options . Chmod , err )
2021-02-24 07:14:04 +08:00
}
perm := os . FileMode ( p )
chmodDirsFiles = & perm
}
2021-03-23 00:00:02 +08:00
chownDirs = & idtools . IDPair { UID : int ( userUID ) , GID : int ( userGID ) }
chownFiles = & idtools . IDPair { UID : int ( userUID ) , GID : int ( userGID ) }
2020-08-21 23:42:49 +08:00
if options . Chown == "" && options . PreserveOwnership {
chownDirs = nil
chownFiles = nil
}
2019-03-20 02:28:54 +08:00
2019-07-25 22:10:03 +08:00
// If we have a single source archive to extract, or more than one
// source item, or the destination has a path separator at the end of
// it, and it's not a remote URL, the destination needs to be a
// directory.
2025-08-01 23:54:27 +08:00
destMustBeDirectory := strings . HasSuffix ( destination , string ( os . PathSeparator ) ) || strings . HasSuffix ( destination , string ( os . PathSeparator ) + "." ) // keep this in sync with github.com/openshift/imagebuilder.hasSlash()
destMustBeDirectory = destMustBeDirectory || destination == "" || ( len ( sources ) > 1 )
2019-07-25 22:10:03 +08:00
if destination == "" || ! filepath . IsAbs ( destination ) {
tmpDestination := filepath . Join ( string ( os . PathSeparator ) + b . WorkDir ( ) , destination )
2025-08-01 23:54:27 +08:00
if destMustBeDirectory {
2019-07-25 22:10:03 +08:00
destination = tmpDestination + string ( os . PathSeparator )
} else {
destination = tmpDestination
}
2019-03-20 02:28:54 +08:00
}
2025-08-01 23:54:27 +08:00
destMustBeDirectory = destMustBeDirectory || ( filepath . Clean ( destination ) == filepath . Clean ( b . WorkDir ( ) ) )
2019-07-25 22:10:03 +08:00
destCanBeFile := false
if len ( sources ) == 1 {
if len ( remoteSources ) == 1 {
destCanBeFile = sourceIsRemote ( sources [ 0 ] )
}
if len ( localSources ) == 1 {
item := localSourceStats [ 0 ] . Results [ localSourceStats [ 0 ] . Globbed [ 0 ] ]
if item . IsDir || ( item . IsArchive && extract ) {
destMustBeDirectory = true
}
if item . IsRegular {
destCanBeFile = true
2019-03-20 02:28:54 +08:00
}
}
2024-03-27 22:23:26 +08:00
if len ( gitSources ) > 0 {
destMustBeDirectory = true
}
2019-03-20 02:28:54 +08:00
}
2019-07-25 22:10:03 +08:00
// We care if the destination either doesn't exist, or exists and is a
// file. If the source can be a single file, for those cases we treat
// the destination as a file rather than as a directory tree.
renameTarget := ""
extractDirectory := filepath . Join ( mountPoint , destination )
statOptions := copier . StatOptions {
CheckForArchives : extract ,
2019-05-30 07:56:28 +08:00
}
2019-07-25 22:10:03 +08:00
destStats , err := copier . Stat ( mountPoint , filepath . Join ( mountPoint , b . WorkDir ( ) ) , statOptions , [ ] string { extractDirectory } )
if err != nil {
2022-09-18 18:36:08 +08:00
return fmt . Errorf ( "checking on destination %v: %w" , extractDirectory , err )
2020-01-09 00:02:05 +08:00
}
2019-07-25 22:10:03 +08:00
if ( len ( destStats ) == 0 || len ( destStats [ 0 ] . Globbed ) == 0 ) && ! destMustBeDirectory && destCanBeFile {
// destination doesn't exist - extract to parent and rename the incoming file to the destination's name
renameTarget = filepath . Base ( extractDirectory )
extractDirectory = filepath . Dir ( extractDirectory )
2019-03-20 02:28:54 +08:00
}
2021-02-07 09:20:48 +08:00
// if the destination is a directory that doesn't yet exist, let's copy it.
2025-04-01 08:53:38 +08:00
newDestDirFound := ( len ( destStats ) == 1 || len ( destStats [ 0 ] . Globbed ) == 0 ) && destMustBeDirectory && ! destCanBeFile
2021-02-07 09:20:48 +08:00
2019-07-25 22:10:03 +08:00
if len ( destStats ) == 1 && len ( destStats [ 0 ] . Globbed ) == 1 && destStats [ 0 ] . Results [ destStats [ 0 ] . Globbed [ 0 ] ] . IsRegular {
if destMustBeDirectory {
2022-07-06 17:14:06 +08:00
return fmt . Errorf ( "destination %v already exists but is not a directory" , destination )
2019-07-25 22:10:03 +08:00
}
// destination exists - it's a file, we need to extract to parent and rename the incoming file to the destination's name
renameTarget = filepath . Base ( extractDirectory )
extractDirectory = filepath . Dir ( extractDirectory )
2019-03-20 02:28:54 +08:00
}
2019-07-25 22:10:03 +08:00
pm , err := fileutils . NewPatternMatcher ( options . Excludes )
2019-04-04 04:32:12 +08:00
if err != nil {
2022-09-18 18:36:08 +08:00
return fmt . Errorf ( "processing excludes list %v: %w" , options . Excludes , err )
2019-04-04 04:32:12 +08:00
}
2019-05-30 07:56:28 +08:00
2021-02-17 02:40:23 +08:00
// Make sure that, if it's a symlink, we'll chroot to the target of the link;
// knowing that target requires that we resolve it within the chroot.
evalOptions := copier . EvalOptions { }
evaluated , err := copier . Eval ( mountPoint , extractDirectory , evalOptions )
if err != nil {
2022-09-18 18:36:08 +08:00
return fmt . Errorf ( "checking on destination %v: %w" , extractDirectory , err )
2021-02-17 02:40:23 +08:00
}
extractDirectory = evaluated
// Set up ID maps.
2019-07-25 22:10:03 +08:00
var srcUIDMap , srcGIDMap [ ] idtools . IDMap
if options . IDMappingOptions != nil {
srcUIDMap , srcGIDMap = convertRuntimeIDMaps ( options . IDMappingOptions . UIDMap , options . IDMappingOptions . GIDMap )
}
destUIDMap , destGIDMap := convertRuntimeIDMaps ( b . IDMappingOptions . UIDMap , b . IDMappingOptions . GIDMap )
2025-06-14 02:26:23 +08:00
var putRoot , putDir , stagingDir string
var createdDirs [ ] string
var latestTimestamp time . Time
2021-02-17 02:40:23 +08:00
mkdirOptions := copier . MkdirOptions {
UIDMap : destUIDMap ,
GIDMap : destGIDMap ,
ChownNew : chownDirs ,
}
2025-06-14 02:26:23 +08:00
// If --link is specified, we create a staging directory to hold the content
// that will then become an independent layer
if options . Link {
containerDir , err := b . store . ContainerDirectory ( b . ContainerID )
if err != nil {
return fmt . Errorf ( "getting container directory for %q: %w" , b . ContainerID , err )
}
stagingDir , err = os . MkdirTemp ( containerDir , "link-stage-" )
if err != nil {
return fmt . Errorf ( "creating staging directory for link %q: %w" , b . ContainerID , err )
}
putRoot = stagingDir
cleanDest := filepath . Clean ( destination )
if strings . Contains ( cleanDest , ".." ) {
return fmt . Errorf ( "invalid destination path %q: contains path traversal" , destination )
}
if renameTarget != "" {
putDir = filepath . Dir ( filepath . Join ( stagingDir , cleanDest ) )
} else {
putDir = filepath . Join ( stagingDir , cleanDest )
}
putDirAbs , err := filepath . Abs ( putDir )
if err != nil {
return fmt . Errorf ( "failed to resolve absolute path: %w" , err )
}
stagingDirAbs , err := filepath . Abs ( stagingDir )
if err != nil {
return fmt . Errorf ( "failed to resolve staging directory absolute path: %w" , err )
}
if ! strings . HasPrefix ( putDirAbs , stagingDirAbs + string ( os . PathSeparator ) ) && putDirAbs != stagingDirAbs {
return fmt . Errorf ( "destination path %q escapes staging directory" , destination )
}
if err := copier . Mkdir ( putRoot , putDirAbs , mkdirOptions ) ; err != nil {
return fmt . Errorf ( "ensuring target directory exists: %w" , err )
}
tempPath := putDir
for tempPath != stagingDir && tempPath != filepath . Dir ( tempPath ) {
if _ , err := os . Stat ( tempPath ) ; err == nil {
createdDirs = append ( createdDirs , tempPath )
}
tempPath = filepath . Dir ( tempPath )
}
} else {
if err := copier . Mkdir ( mountPoint , extractDirectory , mkdirOptions ) ; err != nil {
return fmt . Errorf ( "ensuring target directory exists: %w" , err )
}
putRoot = extractDirectory
putDir = extractDirectory
2021-02-17 02:40:23 +08:00
}
2025-06-14 02:26:23 +08:00
2021-02-17 02:40:23 +08:00
// Copy each source in turn.
2019-07-25 22:10:03 +08:00
for _ , src := range sources {
var multiErr * multierror . Error
var getErr , closeErr , renameErr , putErr error
var wg sync . WaitGroup
2024-03-27 22:23:26 +08:00
if sourceIsRemote ( src ) || sourceIsGit ( src ) {
2019-07-25 22:10:03 +08:00
pipeReader , pipeWriter := io . Pipe ( )
2023-11-10 07:29:06 +08:00
var srcDigest digest . Digest
if options . Checksum != "" {
srcDigest , err = digest . Parse ( options . Checksum )
if err != nil {
return fmt . Errorf ( "invalid checksum flag: %w" , err )
}
}
2024-03-27 22:23:26 +08:00
2019-07-25 22:10:03 +08:00
wg . Add ( 1 )
2024-03-27 22:23:26 +08:00
if sourceIsGit ( src ) {
go func ( ) {
2025-03-26 02:15:59 +08:00
defer wg . Done ( )
defer pipeWriter . Close ( )
2025-01-11 05:14:33 +08:00
var cloneDir , subdir string
cloneDir , subdir , getErr = define . TempDirForURL ( tmpdir . GetTempDir ( ) , "" , src )
2025-03-26 02:15:59 +08:00
if getErr != nil {
return
}
2024-03-27 22:23:26 +08:00
getOptions := copier . GetOptions {
UIDMap : srcUIDMap ,
GIDMap : srcGIDMap ,
Excludes : options . Excludes ,
ExpandArchives : extract ,
ChownDirs : chownDirs ,
ChmodDirs : chmodDirsFiles ,
ChownFiles : chownFiles ,
ChmodFiles : chmodDirsFiles ,
StripSetuidBit : options . StripSetuidBit ,
StripSetgidBit : options . StripSetgidBit ,
StripStickyBit : options . StripStickyBit ,
2025-04-30 04:48:01 +08:00
Timestamp : options . Timestamp ,
2024-03-27 22:23:26 +08:00
}
2024-04-22 21:55:36 +08:00
writer := io . WriteCloser ( pipeWriter )
2025-01-11 05:14:33 +08:00
repositoryDir := filepath . Join ( cloneDir , subdir )
getErr = copier . Get ( repositoryDir , repositoryDir , getOptions , [ ] string { "." } , writer )
2024-03-27 22:23:26 +08:00
} ( )
} else {
go func ( ) {
2024-04-22 21:55:36 +08:00
getErr = retry . IfNecessary ( context . TODO ( ) , func ( ) error {
2025-04-30 04:48:01 +08:00
return getURL ( src , chownFiles , mountPoint , renameTarget , pipeWriter , chmodDirsFiles , srcDigest , options . CertPath , options . InsecureSkipTLSVerify , options . Timestamp )
2024-04-22 21:55:36 +08:00
} , & retry . Options {
MaxRetry : options . MaxRetries ,
Delay : options . RetryDelay ,
} )
2024-03-27 22:23:26 +08:00
pipeWriter . Close ( )
wg . Done ( )
} ( )
}
2019-07-25 22:10:03 +08:00
wg . Add ( 1 )
go func ( ) {
b . ContentDigester . Start ( "" )
hashCloser := b . ContentDigester . Hash ( )
hasher := io . Writer ( hashCloser )
if options . Hasher != nil {
hasher = io . MultiWriter ( hasher , options . Hasher )
}
if options . DryRun {
_ , putErr = io . Copy ( hasher , pipeReader )
} else {
putOptions := copier . PutOptions {
2021-01-05 05:44:30 +08:00
UIDMap : destUIDMap ,
GIDMap : destGIDMap ,
ChownDirs : nil ,
ChmodDirs : nil ,
ChownFiles : nil ,
ChmodFiles : nil ,
2024-10-23 08:24:51 +08:00
IgnoreDevices : userns . RunningInUserNS ( ) ,
2019-07-25 22:10:03 +08:00
}
2025-06-14 02:26:23 +08:00
putErr = copier . Put ( putRoot , putDir , putOptions , io . TeeReader ( pipeReader , hasher ) )
2019-07-25 22:10:03 +08:00
}
hashCloser . Close ( )
pipeReader . Close ( )
wg . Done ( )
} ( )
wg . Wait ( )
if getErr != nil {
2022-09-18 18:36:08 +08:00
getErr = fmt . Errorf ( "reading %q: %w" , src , getErr )
2017-03-07 04:39:22 +08:00
}
2019-07-25 22:10:03 +08:00
if putErr != nil {
2022-09-18 18:36:08 +08:00
putErr = fmt . Errorf ( "storing %q: %w" , src , putErr )
2017-03-24 01:49:38 +08:00
}
2019-07-25 22:10:03 +08:00
multiErr = multierror . Append ( getErr , putErr )
if multiErr != nil && multiErr . ErrorOrNil ( ) != nil {
if len ( multiErr . Errors ) > 1 {
return multiErr . ErrorOrNil ( )
}
return multiErr . Errors [ 0 ]
2017-11-30 22:34:02 +08:00
}
2017-03-07 04:39:22 +08:00
continue
}
2017-07-17 22:42:58 +08:00
2023-11-10 07:29:06 +08:00
if options . Checksum != "" {
return fmt . Errorf ( "checksum flag is not supported for local sources" )
}
2019-07-25 22:10:03 +08:00
// Dig out the result of running glob+stat on this source spec.
var localSourceStat * copier . StatsForGlob
for _ , st := range localSourceStats {
if st . Glob == src {
localSourceStat = st
break
}
2017-03-07 04:39:22 +08:00
}
2019-07-25 22:10:03 +08:00
if localSourceStat == nil {
2021-04-19 18:22:41 +08:00
continue
2017-07-17 22:42:58 +08:00
}
2019-07-25 22:10:03 +08:00
// Iterate through every item that matched the glob.
itemsCopied := 0
2024-08-09 02:20:31 +08:00
for _ , globbed := range localSourceStat . Globbed {
rel := globbed
if filepath . IsAbs ( globbed ) {
if rel , err = filepath . Rel ( contextDir , globbed ) ; err != nil {
return fmt . Errorf ( "computing path of %q relative to %q: %w" , globbed , contextDir , err )
2023-09-16 04:15:24 +08:00
}
2018-09-12 21:28:38 +08:00
}
2019-07-25 22:10:03 +08:00
if strings . HasPrefix ( rel , ".." + string ( os . PathSeparator ) ) {
2024-08-09 02:20:31 +08:00
return fmt . Errorf ( "possible escaping context directory error: %q is outside of %q" , globbed , contextDir )
2017-03-07 04:39:22 +08:00
}
2019-07-25 22:10:03 +08:00
// Check for dockerignore-style exclusion of this item.
if rel != "." {
2025-04-01 14:29:12 +08:00
excluded , err := pm . Matches ( filepath . ToSlash ( rel ) ) //nolint:staticcheck
2019-07-25 22:10:03 +08:00
if err != nil {
2024-08-09 02:20:31 +08:00
return fmt . Errorf ( "checking if %q(%q) is excluded: %w" , globbed , rel , err )
2017-07-17 22:42:58 +08:00
}
2020-10-06 05:43:49 +08:00
if excluded {
// non-directories that are excluded are excluded, no question, but
// directories can only be skipped if we don't have to allow for the
// possibility of finding things to include under them
2024-08-09 02:20:31 +08:00
globInfo := localSourceStat . Results [ globbed ]
2020-10-06 05:43:49 +08:00
if ! globInfo . IsDir || ! includeDirectoryAnyway ( rel , pm ) {
continue
}
2021-02-07 09:20:48 +08:00
} else {
// if the destination is a directory that doesn't yet exist, and is not excluded, let's copy it.
if newDestDirFound {
itemsCopied ++
}
2019-03-20 02:28:54 +08:00
}
2020-10-06 05:43:49 +08:00
} else {
// Make sure we don't trigger a "copied nothing" error for an empty context
// directory if we were told to copy the context directory itself. We won't
// actually copy it, but we need to make sure that we don't produce an error
// due to potentially not having anything in the tarstream that we passed.
itemsCopied ++
2017-03-07 04:39:22 +08:00
}
2024-08-09 02:20:31 +08:00
st := localSourceStat . Results [ globbed ]
2025-06-14 02:26:23 +08:00
if options . Link && st . ModTime . After ( latestTimestamp ) {
latestTimestamp = st . ModTime
}
2019-07-25 22:10:03 +08:00
pipeReader , pipeWriter := io . Pipe ( )
wg . Add ( 1 )
go func ( ) {
renamedItems := 0
writer := io . WriteCloser ( pipeWriter )
if renameTarget != "" {
writer = newTarFilterer ( writer , func ( hdr * tar . Header ) ( bool , bool , io . Reader ) {
hdr . Name = renameTarget
renamedItems ++
return false , false , nil
} )
2020-01-06 21:34:12 +08:00
}
2025-02-17 21:29:46 +08:00
if options . Parents {
parentsPrefixToRemove , parentsToSkip := getParentsPrefixToRemoveAndParentsToSkip ( src , options . ContextDir )
writer = newTarFilterer ( writer , func ( hdr * tar . Header ) ( bool , bool , io . Reader ) {
if slices . Contains ( parentsToSkip , hdr . Name ) && hdr . Typeflag == tar . TypeDir {
return true , false , nil
}
hdr . Name = strings . TrimPrefix ( hdr . Name , parentsPrefixToRemove )
hdr . Name = strings . TrimPrefix ( hdr . Name , "/" )
if hdr . Typeflag == tar . TypeLink {
hdr . Linkname = strings . TrimPrefix ( hdr . Linkname , parentsPrefixToRemove )
hdr . Linkname = strings . TrimPrefix ( hdr . Linkname , "/" )
}
if hdr . Name == "" {
return true , false , nil
}
return false , false , nil
} )
}
2024-08-07 03:07:02 +08:00
writer = newTarFilterer ( writer , func ( _ * tar . Header ) ( bool , bool , io . Reader ) {
2020-10-06 05:43:49 +08:00
itemsCopied ++
return false , false , nil
} )
2019-07-25 22:10:03 +08:00
getOptions := copier . GetOptions {
UIDMap : srcUIDMap ,
GIDMap : srcGIDMap ,
Excludes : options . Excludes ,
ExpandArchives : extract ,
2020-09-30 03:01:27 +08:00
ChownDirs : chownDirs ,
2021-02-24 07:14:04 +08:00
ChmodDirs : chmodDirsFiles ,
2020-09-30 03:01:27 +08:00
ChownFiles : chownFiles ,
2021-02-24 07:14:04 +08:00
ChmodFiles : chmodDirsFiles ,
2019-07-25 22:10:03 +08:00
StripSetuidBit : options . StripSetuidBit ,
StripSetgidBit : options . StripSetgidBit ,
StripStickyBit : options . StripStickyBit ,
2025-02-17 21:29:46 +08:00
Parents : options . Parents ,
2025-04-30 04:48:01 +08:00
Timestamp : options . Timestamp ,
2020-01-06 21:34:12 +08:00
}
2024-08-09 02:20:31 +08:00
getErr = copier . Get ( contextDir , contextDir , getOptions , [ ] string { globbedToGlobbable ( globbed ) } , writer )
2019-07-25 22:10:03 +08:00
closeErr = writer . Close ( )
if renameTarget != "" && renamedItems > 1 {
2022-07-06 17:14:06 +08:00
renameErr = fmt . Errorf ( "internal error: renamed %d items when we expected to only rename 1" , renamedItems )
2019-07-25 22:10:03 +08:00
}
wg . Done ( )
} ( )
wg . Add ( 1 )
go func ( ) {
if st . IsDir {
b . ContentDigester . Start ( "dir" )
} else {
b . ContentDigester . Start ( "file" )
2017-07-17 22:42:58 +08:00
}
2019-07-25 22:10:03 +08:00
hashCloser := b . ContentDigester . Hash ( )
hasher := io . Writer ( hashCloser )
if options . Hasher != nil {
hasher = io . MultiWriter ( hasher , options . Hasher )
}
if options . DryRun {
_ , putErr = io . Copy ( hasher , pipeReader )
} else {
putOptions := copier . PutOptions {
2020-09-30 03:01:27 +08:00
UIDMap : destUIDMap ,
GIDMap : destGIDMap ,
DefaultDirOwner : chownDirs ,
DefaultDirMode : nil ,
ChownDirs : nil ,
ChmodDirs : nil ,
ChownFiles : nil ,
ChmodFiles : nil ,
2024-10-23 08:24:51 +08:00
IgnoreDevices : userns . RunningInUserNS ( ) ,
2019-07-25 22:10:03 +08:00
}
2025-06-14 02:26:23 +08:00
putErr = copier . Put ( putRoot , putDir , putOptions , io . TeeReader ( pipeReader , hasher ) )
2017-07-17 22:42:58 +08:00
}
2019-07-25 22:10:03 +08:00
hashCloser . Close ( )
pipeReader . Close ( )
wg . Done ( )
} ( )
2025-06-14 02:26:23 +08:00
2019-07-25 22:10:03 +08:00
wg . Wait ( )
if getErr != nil {
2022-09-18 18:36:08 +08:00
getErr = fmt . Errorf ( "reading %q: %w" , src , getErr )
2017-03-24 01:49:38 +08:00
}
2019-07-25 22:10:03 +08:00
if closeErr != nil {
2022-09-18 18:36:08 +08:00
closeErr = fmt . Errorf ( "closing %q: %w" , src , closeErr )
2019-07-25 22:10:03 +08:00
}
if renameErr != nil {
2022-09-18 18:36:08 +08:00
renameErr = fmt . Errorf ( "renaming %q: %w" , src , renameErr )
2019-07-25 22:10:03 +08:00
}
if putErr != nil {
2022-09-18 18:36:08 +08:00
putErr = fmt . Errorf ( "storing %q: %w" , src , putErr )
2019-07-25 22:10:03 +08:00
}
multiErr = multierror . Append ( getErr , closeErr , renameErr , putErr )
if multiErr != nil && multiErr . ErrorOrNil ( ) != nil {
if len ( multiErr . Errors ) > 1 {
return multiErr . ErrorOrNil ( )
}
return multiErr . Errors [ 0 ]
2017-03-07 04:39:22 +08:00
}
2019-07-25 22:10:03 +08:00
}
if itemsCopied == 0 {
2021-10-07 21:10:22 +08:00
excludesFile := ""
if options . IgnoreFile != "" {
excludesFile = " using " + options . IgnoreFile
}
2022-07-06 17:14:06 +08:00
return fmt . Errorf ( "no items matching glob %q copied (%d filtered out%s): %w" , localSourceStat . Glob , len ( localSourceStat . Globbed ) , excludesFile , syscall . ENOENT )
2017-03-07 04:39:22 +08:00
}
}
2025-06-14 02:26:23 +08:00
if options . Link {
if ! latestTimestamp . IsZero ( ) {
for _ , dir := range createdDirs {
if err := os . Chtimes ( dir , latestTimestamp , latestTimestamp ) ; err != nil {
logrus . Warnf ( "failed to set timestamp on directory %q: %v" , dir , err )
}
}
}
var created time . Time
if options . Timestamp != nil {
created = * options . Timestamp
} else if ! latestTimestamp . IsZero ( ) {
created = latestTimestamp
} else {
created = time . Unix ( 0 , 0 ) . UTC ( )
}
command := "ADD"
if ! extract {
command = "COPY"
}
contentType , digest := b . ContentDigester . Digest ( )
summary := contentType
if digest != "" {
if summary != "" {
summary = summary + ":"
}
summary = summary + digest . Encoded ( )
logrus . Debugf ( "added content from --link %s" , summary )
}
createdBy := "/bin/sh -c #(nop) " + command + " --link " + summary + " in " + destination + " " + options . BuildMetadata
history := v1 . History {
Created : & created ,
CreatedBy : createdBy ,
Comment : b . HistoryComment ( ) ,
}
linkedLayer := LinkedLayer {
History : history ,
BlobPath : stagingDir ,
}
b . AppendedLinkedLayers = append ( b . AppendedLinkedLayers , linkedLayer )
if err := b . Save ( ) ; err != nil {
return fmt . Errorf ( "saving builder state after queuing linked layer: %w" , err )
}
}
2017-03-07 04:39:22 +08:00
return nil
}
2019-07-25 22:10:03 +08:00
2021-03-23 00:00:02 +08:00
// userForRun returns the user (and group) information which we should use for
// running commands
func ( b * Builder ) userForRun ( mountPoint string , userspec string ) ( specs . User , string , error ) {
2019-07-25 22:10:03 +08:00
if userspec == "" {
userspec = b . User ( )
}
uid , gid , homeDir , err := chrootuser . GetUser ( mountPoint , userspec )
u := specs . User {
UID : uid ,
GID : gid ,
Username : userspec ,
}
if ! strings . Contains ( userspec , ":" ) {
groups , err2 := chrootuser . GetAdditionalGroupsForUser ( mountPoint , uint64 ( u . UID ) )
if err2 != nil {
2022-07-06 17:14:06 +08:00
if ! errors . Is ( err2 , chrootuser . ErrNoSuchUser ) && err == nil {
2019-07-25 22:10:03 +08:00
err = err2
}
} else {
u . AdditionalGids = groups
}
}
return u , homeDir , err
}
2021-03-23 00:00:02 +08:00
// userForCopy returns the user (and group) information which we should use for
// setting ownership of contents being copied. It's just like what
// userForRun() does, except for the case where we're passed a single numeric
// value, where we need to use that value for both the UID and the GID.
func ( b * Builder ) userForCopy ( mountPoint string , userspec string ) ( uint32 , uint32 , error ) {
2021-07-16 01:20:43 +08:00
var (
user , group string
uid , gid uint64
err error
)
split := strings . SplitN ( userspec , ":" , 2 )
user = split [ 0 ]
if len ( split ) > 1 {
group = split [ 1 ]
}
// If userspec did not specify any values for user or group, then fail
if user == "" && group == "" {
2022-07-06 17:14:06 +08:00
return 0 , 0 , fmt . Errorf ( "can't find uid for user %s" , userspec )
2021-07-16 01:20:43 +08:00
}
// If userspec specifies values for user or group, check for numeric values
// and return early. If not, then translate username/groupname
if user != "" {
uid , err = strconv . ParseUint ( user , 10 , 32 )
}
if err == nil {
// default gid to uid
gid = uid
if group != "" {
gid , err = strconv . ParseUint ( group , 10 , 32 )
}
2021-03-23 00:00:02 +08:00
}
2021-07-16 01:20:43 +08:00
// If err != nil, then user or group not numeric, check filesystem
if err == nil {
return uint32 ( uid ) , uint32 ( gid ) , nil
}
owner , _ , err := b . userForRun ( mountPoint , userspec )
2021-03-23 00:00:02 +08:00
if err != nil {
return 0xffffffff , 0xffffffff , err
}
2021-07-16 01:20:43 +08:00
return owner . UID , owner . GID , nil
2021-03-23 00:00:02 +08:00
}
2022-03-23 18:37:57 +08:00
2024-06-08 03:03:18 +08:00
// EnsureContainerPathAs creates the specified directory if it doesn't exist,
// setting a newly-created directory's owner to USER and its permissions to MODE.
2022-03-23 18:37:57 +08:00
func ( b * Builder ) EnsureContainerPathAs ( path , user string , mode * os . FileMode ) error {
mountPoint , err := b . Mount ( b . MountLabel )
if err != nil {
return err
}
defer func ( ) {
if err2 := b . Unmount ( ) ; err2 != nil {
logrus . Errorf ( "error unmounting container: %v" , err2 )
}
} ( )
uid , gid := uint32 ( 0 ) , uint32 ( 0 )
if user != "" {
if uidForCopy , gidForCopy , err := b . userForCopy ( mountPoint , user ) ; err == nil {
uid = uidForCopy
gid = gidForCopy
}
}
destUIDMap , destGIDMap := convertRuntimeIDMaps ( b . IDMappingOptions . UIDMap , b . IDMappingOptions . GIDMap )
idPair := & idtools . IDPair { UID : int ( uid ) , GID : int ( gid ) }
opts := copier . MkdirOptions {
ChmodNew : mode ,
ChownNew : idPair ,
UIDMap : destUIDMap ,
GIDMap : destGIDMap ,
}
return copier . Mkdir ( mountPoint , filepath . Join ( mountPoint , path ) , opts )
}