Use pipes for copying

Use the copier package to rework how we handle ADD and COPY.

When evaluating cache for content that's being copied/added in, switch
from (digest the data, check for a cache entry, then maybe copy the data
and create the new layer) to (copy the data and create the new layer,
digesting as we go, check for a cache entry, either commit or discard
the new layer).

Use the copier package for ADD, COPY, and for ensuring that a specified
directory exists in the working container's rootfs.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
This commit is contained in:
Nalin Dahyabhai 2019-07-25 10:10:03 -04:00
parent 6bbc481436
commit 3835460c3b
23 changed files with 589 additions and 1162 deletions

645
add.go
View File

@ -1,21 +1,25 @@
package buildah package buildah
import ( import (
"archive/tar"
"fmt"
"io" "io"
"io/ioutil"
"net/http" "net/http"
"net/url" "net/url"
"os" "os"
"path" "path"
"path/filepath" "path/filepath"
"strings" "strings"
"sync"
"syscall" "syscall"
"time" "time"
"github.com/containers/buildah/copier"
"github.com/containers/buildah/pkg/chrootuser" "github.com/containers/buildah/pkg/chrootuser"
"github.com/containers/buildah/util"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/fileutils"
"github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/idtools"
"github.com/hashicorp/go-multierror"
"github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
@ -33,9 +37,10 @@ type AddAndCopyOptions struct {
// If the sources include directory trees, Hasher will be passed // If the sources include directory trees, Hasher will be passed
// tar-format archives of the directory trees. // tar-format archives of the directory trees.
Hasher io.Writer Hasher io.Writer
// Excludes is the contents of the .dockerignore file // Excludes is the contents of the .dockerignore file.
Excludes []string Excludes []string
// ContextDir is the base directory for Excludes for content being copied // ContextDir is the base directory for content being copied and
// Excludes patterns.
ContextDir string ContextDir string
// ID mapping options to use when contents to be copied are part of // ID mapping options to use when contents to be copied are part of
// another container, and need ownerships to be mapped from the host to // another container, and need ownerships to be mapped from the host to
@ -44,74 +49,93 @@ type AddAndCopyOptions struct {
// DryRun indicates that the content should be digested, but not actually // DryRun indicates that the content should be digested, but not actually
// copied into the container. // copied into the container.
DryRun bool DryRun bool
// Clear the setuid bit on items being copied. Has no effect on
// archives being extracted, where the bit is always preserved.
StripSetuidBit bool
// Clear the setgid bit on items being copied. Has no effect on
// archives being extracted, where the bit is always preserved.
StripSetgidBit bool
// Clear the sticky bit on items being copied. Has no effect on
// archives being extracted, where the bit is always preserved.
StripStickyBit bool
} }
// addURL copies the contents of the source URL to the destination. This is // sourceIsRemote returns true if "source" is a remote location.
// its own function so that deferred closes happen after we're done pulling func sourceIsRemote(source string) bool {
// down each item of potentially many. return strings.HasPrefix(source, "http://") || strings.HasPrefix(source, "https://")
func (b *Builder) addURL(destination, srcurl string, owner idtools.IDPair, hasher io.Writer, dryRun bool) error { }
resp, err := http.Get(srcurl)
// getURL writes a tar archive containing the named content
func getURL(src, mountpoint, renameTarget string, writer io.Writer) error {
url, err := url.Parse(src)
if err != nil { if err != nil {
return errors.Wrapf(err, "error getting %q", srcurl) return errors.Wrapf(err, "error parsing URL %q", url)
} }
defer resp.Body.Close() response, err := http.Get(src)
thisHasher := hasher
if thisHasher != nil && b.ContentDigester.Hash() != nil {
thisHasher = io.MultiWriter(thisHasher, b.ContentDigester.Hash())
}
if thisHasher == nil {
thisHasher = b.ContentDigester.Hash()
}
thisWriter := thisHasher
if !dryRun {
logrus.Debugf("saving %q to %q", srcurl, destination)
f, err := os.Create(destination)
if err != nil { if err != nil {
return errors.Wrapf(err, "error creating %q", destination) return errors.Wrapf(err, "error parsing URL %q", url)
} }
defer response.Body.Close()
// Figure out what to name the new content.
name := renameTarget
if name == "" {
name = path.Base(url.Path)
}
// If there's a date on the content, use it. If not, use the Unix epoch
// for compatibility.
date := time.Unix(0, 0).UTC()
lastModified := response.Header.Get("Last-Modified")
if lastModified != "" {
d, err := time.Parse(time.RFC1123, lastModified)
if err != nil {
return errors.Wrapf(err, "error parsing last-modified time %q", lastModified)
}
date = d
}
// Figure out the size of the content.
size := response.ContentLength
responseBody := response.Body
if size < 0 {
// Create a temporary file and copy the content to it, so that
// we can figure out how much content there is.
f, err := ioutil.TempFile(mountpoint, "download")
if err != nil {
return errors.Wrapf(err, "error creating temporary file to hold %q", src)
}
defer os.Remove(f.Name())
defer f.Close() defer f.Close()
if err = f.Chown(owner.UID, owner.GID); err != nil { size, err = io.Copy(f, response.Body)
return errors.Wrapf(err, "error setting owner of %q to %d:%d", destination, owner.UID, owner.GID)
}
if last := resp.Header.Get("Last-Modified"); last != "" {
if mtime, err2 := time.Parse(time.RFC1123, last); err2 != nil {
logrus.Debugf("error parsing Last-Modified time %q: %v", last, err2)
} else {
defer func() {
if err3 := os.Chtimes(destination, time.Now(), mtime); err3 != nil {
logrus.Debugf("error setting mtime on %q to Last-Modified time %q: %v", destination, last, err3)
}
}()
}
}
defer func() {
if err2 := f.Chmod(0600); err2 != nil {
logrus.Debugf("error setting permissions on %q: %v", destination, err2)
}
}()
thisWriter = io.MultiWriter(f, thisWriter)
}
n, err := io.Copy(thisWriter, resp.Body)
if err != nil { if err != nil {
return errors.Wrapf(err, "error reading contents for %q from %q", destination, srcurl) return errors.Wrapf(err, "error writing %q to temporary file %q", src, f.Name())
} }
if resp.ContentLength >= 0 && n != resp.ContentLength { _, err = f.Seek(0, io.SeekStart)
return errors.Errorf("error reading contents for %q from %q: wrong length (%d != %d)", destination, srcurl, n, resp.ContentLength) if err != nil {
return errors.Wrapf(err, "error setting up to read %q from temporary file %q", src, f.Name())
} }
return nil responseBody = f
}
// Write the output archive. Set permissions for compatibility.
tw := tar.NewWriter(writer)
defer tw.Close()
hdr := tar.Header{
Typeflag: tar.TypeReg,
Name: name,
Size: size,
Mode: 0600,
ModTime: date,
}
err = tw.WriteHeader(&hdr)
if err != nil {
return errors.Wrapf(err, "error writing header")
}
_, err = io.Copy(tw, responseBody)
return errors.Wrapf(err, "error writing content from %q to tar stream", src)
} }
// Add copies the contents of the specified sources into the container's root // Add copies the contents of the specified sources into the container's root
// filesystem, optionally extracting contents of local files that look like // filesystem, optionally extracting contents of local files that look like
// non-empty archives. // non-empty archives.
func (b *Builder) Add(destination string, extract bool, options AddAndCopyOptions, source ...string) error { func (b *Builder) Add(destination string, extract bool, options AddAndCopyOptions, sources ...string) error {
excludes, err := dockerIgnoreMatcher(options.Excludes, options.ContextDir)
if err != nil {
return err
}
mountPoint, err := b.Mount(b.MountLabel) mountPoint, err := b.Mount(b.MountLabel)
if err != nil { if err != nil {
return err return err
@ -121,64 +145,306 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
logrus.Errorf("error unmounting container: %v", err2) logrus.Errorf("error unmounting container: %v", err2)
} }
}() }()
contextDir := options.ContextDir
if contextDir == "" {
contextDir = string(os.PathSeparator)
}
// Figure out what sorts of sources we have.
var localSources, remoteSources []string
for _, src := range sources {
if sourceIsRemote(src) {
remoteSources = append(remoteSources, src)
continue
}
localSources = append(localSources, src)
}
// Check how many items our local source specs matched. Each spec
// should have matched at least one item, otherwise we consider it an
// error.
var localSourceStats []*copier.StatsForGlob
if len(localSources) > 0 {
statOptions := copier.StatOptions{
CheckForArchives: extract,
}
localSourceStats, err = copier.Stat(contextDir, contextDir, statOptions, localSources)
if err != nil {
return errors.Wrapf(err, "error checking on sources %v under %q", localSources, contextDir)
}
}
numLocalSourceItems := 0
for _, localSourceStat := range localSourceStats {
if localSourceStat.Error != "" {
errorText := localSourceStat.Error
rel, err := filepath.Rel(contextDir, localSourceStat.Glob)
if err != nil {
errorText = fmt.Sprintf("%v; %s", err, errorText)
}
if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
errorText = fmt.Sprintf("possible escaping context directory error: %s", errorText)
}
return errors.Errorf("error checking on source %v under %q: %v", localSourceStat.Glob, contextDir, errorText)
}
if len(localSourceStat.Globbed) == 0 {
return errors.Wrapf(syscall.ENOENT, "error checking on source %v under %q: no glob matches", localSourceStat.Glob, contextDir)
}
numLocalSourceItems += len(localSourceStat.Globbed)
}
if numLocalSourceItems+len(remoteSources) == 0 {
return errors.Wrapf(syscall.ENOENT, "no sources %v found", sources)
}
// Find out which user (and group) the destination should belong to. // Find out which user (and group) the destination should belong to.
user, _, err := b.user(mountPoint, options.Chown) var chownDirs, chownFiles *idtools.IDPair
var chmodDirs, chmodFiles *os.FileMode
var user specs.User
if options.Chown != "" {
user, _, err = b.user(mountPoint, options.Chown)
if err != nil { if err != nil {
return err return errors.Wrapf(err, "error looking up UID/GID for %q", options.Chown)
}
containerOwner := idtools.IDPair{UID: int(user.UID), GID: int(user.GID)}
hostUID, hostGID, err := util.GetHostIDs(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap, user.UID, user.GID)
if err != nil {
return err
}
hostOwner := idtools.IDPair{UID: int(hostUID), GID: int(hostGID)}
dest := mountPoint
if !options.DryRun {
// Resolve the destination if it was specified as a relative path.
if destination != "" && filepath.IsAbs(destination) {
dir := filepath.Dir(destination)
if dir != "." && dir != "/" {
if err = idtools.MkdirAllAndChownNew(filepath.Join(dest, dir), 0755, hostOwner); err != nil {
return errors.Wrapf(err, "error creating directory %q", filepath.Join(dest, dir))
} }
} }
dest = filepath.Join(dest, destination) chownDirs = &idtools.IDPair{UID: int(user.UID), GID: int(user.GID)}
chownFiles = &idtools.IDPair{UID: int(user.UID), GID: int(user.GID)}
// If we have a single source archive to extract, or more than one
// source item, or the destination has a path separator at the end of
// it, and it's not a remote URL, the destination needs to be a
// directory.
if destination == "" || !filepath.IsAbs(destination) {
tmpDestination := filepath.Join(string(os.PathSeparator)+b.WorkDir(), destination)
if destination == "" || strings.HasSuffix(destination, string(os.PathSeparator)) {
destination = tmpDestination + string(os.PathSeparator)
} else { } else {
if err = idtools.MkdirAllAndChownNew(filepath.Join(dest, b.WorkDir()), 0755, hostOwner); err != nil { destination = tmpDestination
return errors.Wrapf(err, "error creating directory %q", filepath.Join(dest, b.WorkDir()))
}
dest = filepath.Join(dest, b.WorkDir(), destination)
}
// If the destination was explicitly marked as a directory by ending it
// with a '/', create it so that we can be sure that it's a directory,
// and any files we're copying will be placed in the directory.
if len(destination) > 0 && destination[len(destination)-1] == os.PathSeparator {
if err = idtools.MkdirAllAndChownNew(dest, 0755, hostOwner); err != nil {
return errors.Wrapf(err, "error creating directory %q", dest)
} }
} }
// Make sure the destination's parent directory is usable. destMustBeDirectory := (len(sources) > 1) || strings.HasSuffix(destination, string(os.PathSeparator))
if destpfi, err2 := os.Stat(filepath.Dir(dest)); err2 == nil && !destpfi.IsDir() { destCanBeFile := false
return errors.Errorf("%q already exists, but is not a subdirectory)", filepath.Dir(dest)) if len(sources) == 1 {
if len(remoteSources) == 1 {
destCanBeFile = sourceIsRemote(sources[0])
}
if len(localSources) == 1 {
item := localSourceStats[0].Results[localSourceStats[0].Globbed[0]]
if item.IsDir || (item.IsArchive && extract) {
destMustBeDirectory = true
}
if item.IsRegular {
destCanBeFile = true
} }
} }
// Now look at the destination itself. }
destfi, err := os.Stat(dest)
// We care if the destination either doesn't exist, or exists and is a
// file. If the source can be a single file, for those cases we treat
// the destination as a file rather than as a directory tree.
renameTarget := ""
extractDirectory := filepath.Join(mountPoint, destination)
statOptions := copier.StatOptions{
CheckForArchives: extract,
}
destStats, err := copier.Stat(mountPoint, filepath.Join(mountPoint, b.WorkDir()), statOptions, []string{extractDirectory})
if err != nil { if err != nil {
if !os.IsNotExist(err) { return errors.Wrapf(err, "error checking on destination %v", extractDirectory)
return errors.Wrapf(err, "couldn't determine what %q is", dest)
} }
destfi = nil if (len(destStats) == 0 || len(destStats[0].Globbed) == 0) && !destMustBeDirectory && destCanBeFile {
// destination doesn't exist - extract to parent and rename the incoming file to the destination's name
renameTarget = filepath.Base(extractDirectory)
extractDirectory = filepath.Dir(extractDirectory)
} }
if len(source) > 1 && (destfi == nil || !destfi.IsDir()) { if len(destStats) == 1 && len(destStats[0].Globbed) == 1 && destStats[0].Results[destStats[0].Globbed[0]].IsRegular {
return errors.Errorf("destination %q is not a directory", dest) if destMustBeDirectory {
return errors.Errorf("destination %v already exists but is not a directory", destination)
} }
copyFileWithTar := b.copyFileWithTar(options.IDMappingOptions, &containerOwner, options.Hasher, options.DryRun) // destination exists - it's a file, we need to extract to parent and rename the incoming file to the destination's name
copyWithTar := b.copyWithTar(options.IDMappingOptions, &containerOwner, options.Hasher, options.DryRun) renameTarget = filepath.Base(extractDirectory)
untarPath := b.untarPath(nil, options.Hasher, options.DryRun) extractDirectory = filepath.Dir(extractDirectory)
err = b.addHelper(excludes, extract, dest, destfi, hostOwner, options, copyFileWithTar, copyWithTar, untarPath, source...) }
pm, err := fileutils.NewPatternMatcher(options.Excludes)
if err != nil { if err != nil {
return err return errors.Wrapf(err, "error processing excludes list %v", options.Excludes)
}
// Copy each source in turn.
var srcUIDMap, srcGIDMap []idtools.IDMap
if options.IDMappingOptions != nil {
srcUIDMap, srcGIDMap = convertRuntimeIDMaps(options.IDMappingOptions.UIDMap, options.IDMappingOptions.GIDMap)
}
destUIDMap, destGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap)
for _, src := range sources {
var multiErr *multierror.Error
var getErr, closeErr, renameErr, putErr error
var wg sync.WaitGroup
if sourceIsRemote(src) {
pipeReader, pipeWriter := io.Pipe()
wg.Add(1)
go func() {
getErr = getURL(src, mountPoint, renameTarget, pipeWriter)
pipeWriter.Close()
wg.Done()
}()
wg.Add(1)
go func() {
b.ContentDigester.Start("")
hashCloser := b.ContentDigester.Hash()
hasher := io.Writer(hashCloser)
if options.Hasher != nil {
hasher = io.MultiWriter(hasher, options.Hasher)
}
if options.DryRun {
_, putErr = io.Copy(hasher, pipeReader)
} else {
putOptions := copier.PutOptions{
UIDMap: destUIDMap,
GIDMap: destGIDMap,
ChownDirs: chownDirs,
ChmodDirs: chmodDirs,
ChownFiles: chownFiles,
ChmodFiles: chmodFiles,
}
putErr = copier.Put(mountPoint, extractDirectory, putOptions, io.TeeReader(pipeReader, hasher))
}
hashCloser.Close()
pipeReader.Close()
wg.Done()
}()
wg.Wait()
if getErr != nil {
getErr = errors.Wrapf(getErr, "error reading %q", src)
}
if putErr != nil {
putErr = errors.Wrapf(putErr, "error storing %q", src)
}
multiErr = multierror.Append(getErr, putErr)
if multiErr != nil && multiErr.ErrorOrNil() != nil {
if len(multiErr.Errors) > 1 {
return multiErr.ErrorOrNil()
}
return multiErr.Errors[0]
}
continue
}
// Dig out the result of running glob+stat on this source spec.
var localSourceStat *copier.StatsForGlob
for _, st := range localSourceStats {
if st.Glob == src {
localSourceStat = st
break
}
}
if localSourceStat == nil {
return errors.Errorf("internal error: should have statted %s, but we didn't?", src)
}
// Iterate through every item that matched the glob.
itemsCopied := 0
for _, glob := range localSourceStat.Globbed {
rel, err := filepath.Rel(contextDir, glob)
if err != nil {
return errors.Wrapf(err, "error computing path of %q", glob)
}
if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
return errors.Errorf("possible escaping context directory error: %q is outside of %q", glob, contextDir)
}
// Check for dockerignore-style exclusion of this item.
if rel != "." {
matches, err := pm.Matches(filepath.ToSlash(rel)) // nolint:staticcheck
if err != nil {
return errors.Wrapf(err, "error checking if %q(%q) is excluded", glob, rel)
}
if matches {
continue
}
}
st := localSourceStat.Results[glob]
pipeReader, pipeWriter := io.Pipe()
wg.Add(1)
go func() {
renamedItems := 0
writer := io.WriteCloser(pipeWriter)
if renameTarget != "" {
writer = newTarFilterer(writer, func(hdr *tar.Header) (bool, bool, io.Reader) {
hdr.Name = renameTarget
renamedItems++
return false, false, nil
})
}
getOptions := copier.GetOptions{
UIDMap: srcUIDMap,
GIDMap: srcGIDMap,
Excludes: options.Excludes,
ExpandArchives: extract,
StripSetuidBit: options.StripSetuidBit,
StripSetgidBit: options.StripSetgidBit,
StripStickyBit: options.StripStickyBit,
}
getErr = copier.Get(contextDir, contextDir, getOptions, []string{glob}, writer)
closeErr = writer.Close()
if renameTarget != "" && renamedItems > 1 {
renameErr = errors.Errorf("internal error: renamed %d items when we expected to only rename 1", renamedItems)
}
wg.Done()
}()
wg.Add(1)
go func() {
if st.IsDir {
b.ContentDigester.Start("dir")
} else {
b.ContentDigester.Start("file")
}
hashCloser := b.ContentDigester.Hash()
hasher := io.Writer(hashCloser)
if options.Hasher != nil {
hasher = io.MultiWriter(hasher, options.Hasher)
}
if options.DryRun {
_, putErr = io.Copy(hasher, pipeReader)
} else {
putOptions := copier.PutOptions{
UIDMap: destUIDMap,
GIDMap: destGIDMap,
ChownDirs: chownDirs,
ChmodDirs: chmodDirs,
ChownFiles: chownFiles,
ChmodFiles: chmodFiles,
}
putErr = copier.Put(mountPoint, extractDirectory, putOptions, io.TeeReader(pipeReader, hasher))
}
hashCloser.Close()
pipeReader.Close()
wg.Done()
}()
wg.Wait()
if getErr != nil {
getErr = errors.Wrapf(getErr, "error reading %q", src)
}
if closeErr != nil {
closeErr = errors.Wrapf(closeErr, "error closing %q", src)
}
if renameErr != nil {
renameErr = errors.Wrapf(renameErr, "error renaming %q", src)
}
if putErr != nil {
putErr = errors.Wrapf(putErr, "error storing %q", src)
}
multiErr = multierror.Append(getErr, closeErr, renameErr, putErr)
if multiErr != nil && multiErr.ErrorOrNil() != nil {
if len(multiErr.Errors) > 1 {
return multiErr.ErrorOrNil()
}
return multiErr.Errors[0]
}
itemsCopied++
}
if itemsCopied == 0 {
return errors.Wrapf(syscall.ENOENT, "no items matching glob %q copied (%d filtered)", localSourceStat.Glob, len(localSourceStat.Globbed))
}
} }
return nil return nil
} }
@ -208,180 +474,3 @@ func (b *Builder) user(mountPoint string, userspec string) (specs.User, string,
} }
return u, homeDir, err return u, homeDir, err
} }
// dockerIgnoreMatcher returns a matcher based on the contents of the .dockerignore file under contextDir
func dockerIgnoreMatcher(lines []string, contextDir string) (*fileutils.PatternMatcher, error) {
// if there's no context dir, there's no .dockerignore file to consult
if contextDir == "" {
return nil, nil
}
// If there's no .dockerignore file, then we don't have to add a
// pattern to tell copy logic to ignore it later.
var patterns []string
if _, err := os.Stat(filepath.Join(contextDir, ".dockerignore")); err == nil || !os.IsNotExist(err) {
patterns = []string{".dockerignore"}
}
for _, ignoreSpec := range lines {
ignoreSpec = strings.TrimSpace(ignoreSpec)
// ignore comments passed back from .dockerignore
if ignoreSpec == "" || ignoreSpec[0] == '#' {
continue
}
// if the spec starts with '!' it means the pattern
// should be included. make a note so that we can move
// it to the front of the updated pattern, and insert
// the context dir's path in between
includeFlag := ""
if strings.HasPrefix(ignoreSpec, "!") {
includeFlag = "!"
ignoreSpec = ignoreSpec[1:]
}
if ignoreSpec == "" {
continue
}
patterns = append(patterns, includeFlag+filepath.Join(contextDir, ignoreSpec))
}
// if there are no patterns, save time by not constructing the object
if len(patterns) == 0 {
return nil, nil
}
// return a matcher object
matcher, err := fileutils.NewPatternMatcher(patterns)
if err != nil {
return nil, errors.Wrapf(err, "error creating file matcher using patterns %v", patterns)
}
return matcher, nil
}
func (b *Builder) addHelper(excludes *fileutils.PatternMatcher, extract bool, dest string, destfi os.FileInfo, hostOwner idtools.IDPair, options AddAndCopyOptions, copyFileWithTar, copyWithTar, untarPath func(src, dest string) error, source ...string) error {
for n, src := range source {
if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") {
b.ContentDigester.Start("")
// We assume that source is a file, and we're copying
// it to the destination. If the destination is
// already a directory, create a file inside of it.
// Otherwise, the destination is the file to which
// we'll save the contents.
url, err := url.Parse(src)
if err != nil {
return errors.Wrapf(err, "error parsing URL %q", src)
}
d := dest
if destfi != nil && destfi.IsDir() {
d = filepath.Join(dest, path.Base(url.Path))
}
if err = b.addURL(d, src, hostOwner, options.Hasher, options.DryRun); err != nil {
return err
}
continue
}
glob, err := filepath.Glob(src)
if err != nil {
return errors.Wrapf(err, "invalid glob %q", src)
}
if len(glob) == 0 {
return errors.Wrapf(syscall.ENOENT, "no files found matching %q", src)
}
for _, gsrc := range glob {
esrc, err := filepath.EvalSymlinks(gsrc)
if err != nil {
return errors.Wrapf(err, "error evaluating symlinks %q", gsrc)
}
srcfi, err := os.Stat(esrc)
if err != nil {
return errors.Wrapf(err, "error reading %q", esrc)
}
if srcfi.IsDir() {
b.ContentDigester.Start("dir")
// The source is a directory, so copy the contents of
// the source directory into the target directory. Try
// to create it first, so that if there's a problem,
// we'll discover why that won't work.
if !options.DryRun {
if err = idtools.MkdirAllAndChownNew(dest, 0755, hostOwner); err != nil {
return errors.Wrapf(err, "error creating directory %q", dest)
}
}
logrus.Debugf("copying[%d] %q to %q", n, esrc+string(os.PathSeparator)+"*", dest+string(os.PathSeparator)+"*")
// Copy the whole directory because we do not exclude anything
if excludes == nil {
if err = copyWithTar(esrc, dest); err != nil {
return errors.Wrapf(err, "error copying %q to %q", esrc, dest)
}
continue
}
err := filepath.Walk(esrc, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
res, err := excludes.MatchesResult(path)
if err != nil {
return errors.Wrapf(err, "error checking if %s is an excluded path", path)
}
// The latest match result has the highest priority,
// which means that we only skip the filepath if
// the last result matched.
if res.IsMatched() {
return nil
}
// combine the source's basename with the dest directory
fpath, err := filepath.Rel(esrc, path)
if err != nil {
return errors.Wrapf(err, "error converting %s to a path relative to %s", path, esrc)
}
if err = copyFileWithTar(path, filepath.Join(dest, fpath)); err != nil {
return errors.Wrapf(err, "error copying %q to %q", path, dest)
}
return nil
})
if err != nil {
return err
}
continue
}
// This source is a file
// Check if the path matches the .dockerignore
if excludes != nil {
res, err := excludes.MatchesResult(esrc)
if err != nil {
return errors.Wrapf(err, "error checking if %s is an excluded path", esrc)
}
// Skip the file if the pattern matches
if res.IsMatched() {
continue
}
}
b.ContentDigester.Start("file")
if !extract || !archive.IsArchivePath(esrc) {
// This source is a file, and either it's not an
// archive, or we don't care whether or not it's an
// archive.
d := dest
if destfi != nil && destfi.IsDir() {
d = filepath.Join(dest, filepath.Base(gsrc))
}
// Copy the file, preserving attributes.
logrus.Debugf("copying[%d] %q to %q", n, esrc, d)
if err = copyFileWithTar(esrc, d); err != nil {
return errors.Wrapf(err, "error copying %q to %q", esrc, d)
}
continue
}
// We're extracting an archive into the destination directory.
logrus.Debugf("extracting contents[%d] of %q into %q", n, esrc, dest)
if err = untarPath(esrc, dest); err != nil {
return errors.Wrapf(err, "error extracting %q into %q", esrc, dest)
}
}
}
return nil
}

View File

@ -5,7 +5,6 @@ import (
"github.com/containers/buildah" "github.com/containers/buildah"
buildahcli "github.com/containers/buildah/pkg/cli" buildahcli "github.com/containers/buildah/pkg/cli"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )
@ -98,20 +97,25 @@ func addAndCopyCmd(c *cobra.Command, args []string, verb string, extractLocalArc
return errors.Wrapf(err, "error reading build container %q", name) return errors.Wrapf(err, "error reading build container %q", name)
} }
digester := digest.Canonical.Digester() builder.ContentDigester.Restart()
options := buildah.AddAndCopyOptions{ options := buildah.AddAndCopyOptions{
Chown: iopts.chown, Chown: iopts.chown,
Hasher: digester.Hash(),
} }
if err := builder.Add(dest, extractLocalArchives, options, args...); err != nil { err = builder.Add(dest, extractLocalArchives, options, args...)
if err != nil {
return errors.Wrapf(err, "error adding content to container %q", builder.Container) return errors.Wrapf(err, "error adding content to container %q", builder.Container)
} }
contentType, digest := builder.ContentDigester.Digest()
if !iopts.quiet { if !iopts.quiet {
fmt.Printf("%s\n", digester.Digest().Hex()) fmt.Printf("%s\n", digest.Hex())
} }
conditionallyAddHistory(builder, c, "/bin/sh -c #(nop) %s file:%s", verb, digester.Digest().Hex()) if contentType != "" {
contentType = contentType + ":"
}
conditionallyAddHistory(builder, c, "/bin/sh -c #(nop) %s %s%s", verb, contentType, digest.Hex())
return builder.Save() return builder.Save()
} }

View File

@ -452,7 +452,7 @@ func copierWithoutSubprocess(bulkReader io.Reader, bulkWriter io.Writer, req req
} }
func closeIfNotNilYet(f **os.File, what string) { func closeIfNotNilYet(f **os.File, what string) {
if f != nil { if f != nil && *f != nil {
err := (*f).Close() err := (*f).Close()
*f = nil *f = nil
if err != nil { if err != nil {

View File

@ -82,6 +82,10 @@ func (t *tarFilterer) Close() error {
// newTarFilterer passes one or more tar archives through to an io.WriteCloser // newTarFilterer passes one or more tar archives through to an io.WriteCloser
// as a single archive, potentially calling filter to modify headers and // as a single archive, potentially calling filter to modify headers and
// contents as it goes. // contents as it goes.
//
// Note: if "filter" indicates that a given item should be skipped, there is no
// guarantee that there will not be a subsequent item of type TypeLink, which
// is a hard link, which points to the skipped item as the link target.
func newTarFilterer(writeCloser io.WriteCloser, filter func(hdr *tar.Header) (skip, replaceContents bool, replacementContents io.Reader)) io.WriteCloser { func newTarFilterer(writeCloser io.WriteCloser, filter func(hdr *tar.Header) (skip, replaceContents bool, replacementContents io.Reader)) io.WriteCloser {
pipeReader, pipeWriter := io.Pipe() pipeReader, pipeWriter := io.Pipe()
tarWriter := tar.NewWriter(writeCloser) tarWriter := tar.NewWriter(writeCloser)

1
go.mod
View File

@ -8,7 +8,6 @@ require (
github.com/containers/image/v5 v5.5.1 github.com/containers/image/v5 v5.5.1
github.com/containers/ocicrypt v1.0.3 github.com/containers/ocicrypt v1.0.3
github.com/containers/storage v1.23.0 github.com/containers/storage v1.23.0
github.com/cyphar/filepath-securejoin v0.2.2
github.com/docker/distribution v2.7.1+incompatible github.com/docker/distribution v2.7.1+incompatible
github.com/docker/go-units v0.4.0 github.com/docker/go-units v0.4.0
github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316 github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316

View File

@ -13,6 +13,7 @@ import (
"strings" "strings"
"time" "time"
"github.com/containers/buildah/copier"
"github.com/containers/buildah/docker" "github.com/containers/buildah/docker"
"github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/image" "github.com/containers/image/v5/image"
@ -21,6 +22,7 @@ import (
"github.com/containers/image/v5/types" "github.com/containers/image/v5/types"
"github.com/containers/storage" "github.com/containers/storage"
"github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/ioutils"
digest "github.com/opencontainers/go-digest" digest "github.com/opencontainers/go-digest"
specs "github.com/opencontainers/image-spec/specs-go" specs "github.com/opencontainers/image-spec/specs-go"
@ -58,7 +60,7 @@ type containerImageRef struct {
exporting bool exporting bool
squash bool squash bool
emptyLayer bool emptyLayer bool
tarPath func(path string) (io.ReadCloser, error) idMappingOptions *IDMappingOptions
parent string parent string
blobDirectory string blobDirectory string
preEmptyLayers []v1.History preEmptyLayers []v1.History
@ -142,16 +144,25 @@ func computeLayerMIMEType(what string, layerCompression archive.Compression) (om
// Extract the container's whole filesystem as if it were a single layer. // Extract the container's whole filesystem as if it were a single layer.
func (i *containerImageRef) extractRootfs() (io.ReadCloser, error) { func (i *containerImageRef) extractRootfs() (io.ReadCloser, error) {
var uidMap, gidMap []idtools.IDMap
mountPoint, err := i.store.Mount(i.containerID, i.mountLabel) mountPoint, err := i.store.Mount(i.containerID, i.mountLabel)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "error mounting container %q", i.containerID) return nil, errors.Wrapf(err, "error mounting container %q", i.containerID)
} }
rc, err := i.tarPath(mountPoint) pipeReader, pipeWriter := io.Pipe()
if err != nil { go func() {
return nil, errors.Wrapf(err, "error extracting rootfs from container %q", i.containerID) if i.idMappingOptions != nil {
uidMap, gidMap = convertRuntimeIDMaps(i.idMappingOptions.UIDMap, i.idMappingOptions.GIDMap)
} }
return ioutils.NewReadCloserWrapper(rc, func() error { copierOptions := copier.GetOptions{
if err = rc.Close(); err != nil { UIDMap: uidMap,
GIDMap: gidMap,
}
err = copier.Get(mountPoint, mountPoint, copierOptions, []string{"."}, pipeWriter)
pipeWriter.Close()
}()
return ioutils.NewReadCloserWrapper(pipeReader, func() error {
if err = pipeReader.Close(); err != nil {
err = errors.Wrapf(err, "error closing tar archive of container %q", i.containerID) err = errors.Wrapf(err, "error closing tar archive of container %q", i.containerID)
} }
if _, err2 := i.store.Unmount(i.containerID, false); err == nil { if _, err2 := i.store.Unmount(i.containerID, false); err == nil {
@ -414,7 +425,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
} else { } else {
size = counter.Count size = counter.Count
} }
logrus.Debugf("%s size is %d bytes", what, size) logrus.Debugf("%s size is %d bytes, uncompressed digest %s, possibly-compressed digest %s", what, size, srcHasher.Digest().String(), destHasher.Digest().String())
// Rename the layer so that we can more easily find it by digest later. // Rename the layer so that we can more easily find it by digest later.
finalBlobName := filepath.Join(path, destHasher.Digest().String()) finalBlobName := filepath.Join(path, destHasher.Digest().String())
if err = os.Rename(filepath.Join(path, "layer"), finalBlobName); err != nil { if err = os.Rename(filepath.Join(path, "layer"), finalBlobName); err != nil {
@ -735,7 +746,7 @@ func (b *Builder) makeImageRef(options CommitOptions, exporting bool) (types.Ima
exporting: exporting, exporting: exporting,
squash: options.Squash, squash: options.Squash,
emptyLayer: options.EmptyLayer && !options.Squash, emptyLayer: options.EmptyLayer && !options.Squash,
tarPath: b.tarPath(&b.IDMappingOptions), idMappingOptions: &b.IDMappingOptions,
parent: parent, parent: parent,
blobDirectory: options.BlobDirectory, blobDirectory: options.BlobDirectory,
preEmptyLayers: b.PrependedEmptyLayers, preEmptyLayers: b.PrependedEmptyLayers,

View File

@ -12,8 +12,8 @@ import (
"time" "time"
"github.com/containers/buildah" "github.com/containers/buildah"
"github.com/containers/buildah/copier"
buildahdocker "github.com/containers/buildah/docker" buildahdocker "github.com/containers/buildah/docker"
"github.com/containers/buildah/pkg/chrootuser"
"github.com/containers/buildah/util" "github.com/containers/buildah/util"
cp "github.com/containers/image/v5/copy" cp "github.com/containers/image/v5/copy"
"github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/docker/reference"
@ -23,7 +23,6 @@ import (
"github.com/containers/image/v5/types" "github.com/containers/image/v5/types"
"github.com/containers/storage" "github.com/containers/storage"
"github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/archive"
securejoin "github.com/cyphar/filepath-securejoin"
docker "github.com/fsouza/go-dockerclient" docker "github.com/fsouza/go-dockerclient"
v1 "github.com/opencontainers/image-spec/specs-go/v1" v1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/openshift/imagebuilder" "github.com/openshift/imagebuilder"
@ -55,7 +54,6 @@ type StageExecutor struct {
volumeCache map[string]string volumeCache map[string]string
volumeCacheInfo map[string]os.FileInfo volumeCacheInfo map[string]os.FileInfo
mountPoint string mountPoint string
copyFrom string // Used to keep track of the --from flag from COPY and ADD
output string output string
containerIDs []string containerIDs []string
stage *imagebuilder.Stage stage *imagebuilder.Stage
@ -258,166 +256,11 @@ func (s *StageExecutor) volumeCacheRestore() error {
return nil return nil
} }
// digestSpecifiedContent digests any content that this next instruction would add to
// the image, returning the digester if there is any, or nil otherwise. We
// don't care about the details of where in the filesystem the content actually
// goes, because we're not actually going to add it here, so this is less
// involved than Copy().
func (s *StageExecutor) digestSpecifiedContent(ctx context.Context, node *parser.Node, argValues []string, envValues []string) (string, error) {
// No instruction: done.
if node == nil {
return "", nil
}
// Not adding content: done.
switch strings.ToUpper(node.Value) {
default:
return "", nil
case "ADD", "COPY":
}
// Pull out everything except the first node (the instruction) and the
// last node (the destination).
var srcs []string
destination := node
for destination.Next != nil {
destination = destination.Next
if destination.Next != nil {
srcs = append(srcs, destination.Value)
}
}
var sources []string
var idMappingOptions *buildah.IDMappingOptions
contextDir := s.executor.contextDir
for _, flag := range node.Flags {
if strings.HasPrefix(flag, "--from=") {
// Flag says to read the content from another
// container. Update the ID mappings and
// all-content-comes-from-below-this-directory value.
from := strings.TrimPrefix(flag, "--from=")
// If from has an argument within it, resolve it to its
// value. Otherwise just return the value found.
var fromErr error
from, fromErr = imagebuilder.ProcessWord(from, s.stage.Builder.Arguments())
if fromErr != nil {
return "", errors.Wrapf(fromErr, "unable to resolve argument %q", from)
}
if isStage, err := s.executor.waitForStage(ctx, from, s.stages[:s.index]); isStage && err != nil {
return "", err
}
if other, ok := s.executor.stages[from]; ok && other.index < s.index {
contextDir = other.mountPoint
idMappingOptions = &other.builder.IDMappingOptions
} else if builder, ok := s.executor.containerMap[from]; ok {
contextDir = builder.MountPoint
idMappingOptions = &builder.IDMappingOptions
} else {
return "", errors.Errorf("the stage %q has not been built", from)
}
}
}
varValues := append(argValues, envValues...)
for _, src := range srcs {
// If src has an argument within it, resolve it to its
// value. Otherwise just return the value found.
name, err := imagebuilder.ProcessWord(src, varValues)
if err != nil {
return "", errors.Wrapf(err, "unable to resolve source %q", src)
}
src = name
if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") {
// Source is a URL. TODO: cache this content
// somewhere, so that we can avoid pulling it down
// again if we end up needing to drop it into the
// filesystem.
sources = append(sources, src)
} else {
// Source is not a URL, so it's a location relative to
// the all-content-comes-from-below-this-directory
// directory. Also raise an error if the src escapes
// the context directory.
contextSrc, err := securejoin.SecureJoin(contextDir, src)
if err == nil && strings.HasPrefix(src, "../") {
err = errors.New("escaping context directory error")
}
if err != nil {
return "", errors.Wrapf(err, "forbidden path for %q, it is outside of the build context %q", src, contextDir)
}
sources = append(sources, contextSrc)
}
}
// If the all-content-comes-from-below-this-directory is the build
// context, read its .dockerignore.
var excludes []string
if contextDir == s.executor.contextDir {
var err error
if excludes, err = imagebuilder.ParseDockerignore(contextDir); err != nil {
return "", errors.Wrapf(err, "error parsing .dockerignore in %s", contextDir)
}
}
// Restart the digester and have it do a dry-run copy to compute the
// digest information.
options := buildah.AddAndCopyOptions{
Excludes: excludes,
ContextDir: contextDir,
IDMappingOptions: idMappingOptions,
DryRun: true,
}
s.builder.ContentDigester.Restart()
download := strings.ToUpper(node.Value) == "ADD"
// If destination.Value has an argument within it, resolve it to its
// value. Otherwise just return the value found.
destValue, destErr := imagebuilder.ProcessWord(destination.Value, varValues)
if destErr != nil {
return "", errors.Wrapf(destErr, "unable to resolve destination %q", destination.Value)
}
err := s.builder.Add(destValue, download, options, sources...)
if err != nil {
return "", errors.Wrapf(err, "error dry-running %q", node.Original)
}
// Return the formatted version of the digester's result.
contentDigest := ""
prefix, digest := s.builder.ContentDigester.Digest()
if prefix != "" {
prefix += ":"
}
if digest.Validate() == nil {
contentDigest = prefix + digest.Encoded()
}
return contentDigest, nil
}
// Copy copies data into the working tree. The "Download" field is how // Copy copies data into the working tree. The "Download" field is how
// imagebuilder tells us the instruction was "ADD" and not "COPY". // imagebuilder tells us the instruction was "ADD" and not "COPY".
func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) error { func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) error {
s.builder.ContentDigester.Restart() s.builder.ContentDigester.Restart()
for _, copy := range copies { for _, copy := range copies {
// Check the file and see if part of it is a symlink.
// Convert it to the target if so. To be ultrasafe
// do the same for the mountpoint.
hadFinalPathSeparator := len(copy.Dest) > 0 && copy.Dest[len(copy.Dest)-1] == os.PathSeparator
secureMountPoint, err := securejoin.SecureJoin("", s.mountPoint)
if err != nil {
return errors.Wrapf(err, "error resolving symlinks for copy destination %s", copy.Dest)
}
finalPath, err := securejoin.SecureJoin(secureMountPoint, copy.Dest)
if err != nil {
return errors.Wrapf(err, "error resolving symlinks for copy destination %s", copy.Dest)
}
if !strings.HasPrefix(finalPath, secureMountPoint) {
return errors.Wrapf(err, "error resolving copy destination %s", copy.Dest)
}
copy.Dest = strings.TrimPrefix(finalPath, secureMountPoint)
if len(copy.Dest) == 0 || copy.Dest[len(copy.Dest)-1] != os.PathSeparator {
if hadFinalPathSeparator {
copy.Dest += string(os.PathSeparator)
}
}
if copy.Download { if copy.Download {
logrus.Debugf("ADD %#v, %#v", excludes, copy) logrus.Debugf("ADD %#v, %#v", excludes, copy)
} else { } else {
@ -432,12 +275,20 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err
// all-content-comes-from-below-this-directory value. // all-content-comes-from-below-this-directory value.
var idMappingOptions *buildah.IDMappingOptions var idMappingOptions *buildah.IDMappingOptions
var copyExcludes []string var copyExcludes []string
stripSetuid := false
stripSetgid := false
contextDir := s.executor.contextDir contextDir := s.executor.contextDir
if len(copy.From) > 0 { if len(copy.From) > 0 {
if isStage, err := s.executor.waitForStage(s.ctx, copy.From, s.stages[:s.index]); isStage && err != nil { // If from has an argument within it, resolve it to its
// value. Otherwise just return the value found.
from, fromErr := imagebuilder.ProcessWord(copy.From, s.stage.Builder.Arguments())
if fromErr != nil {
return errors.Wrapf(fromErr, "unable to resolve argument %q", copy.From)
}
if isStage, err := s.executor.waitForStage(s.ctx, from, s.stages[:s.index]); isStage && err != nil {
return err return err
} }
if other, ok := s.executor.stages[copy.From]; ok && other.index < s.index { if other, ok := s.executor.stages[from]; ok && other.index < s.index {
contextDir = other.mountPoint contextDir = other.mountPoint
idMappingOptions = &other.builder.IDMappingOptions idMappingOptions = &other.builder.IDMappingOptions
} else if builder, ok := s.executor.containerMap[copy.From]; ok { } else if builder, ok := s.executor.containerMap[copy.From]; ok {
@ -449,6 +300,8 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err
copyExcludes = excludes copyExcludes = excludes
} else { } else {
copyExcludes = append(s.executor.excludes, excludes...) copyExcludes = append(s.executor.excludes, excludes...)
stripSetuid = true // did this change between 18.06 and 19.03?
stripSetgid = true // did this change between 18.06 and 19.03?
} }
for _, src := range copy.Src { for _, src := range copy.Src {
if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") { if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") {
@ -460,43 +313,7 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err
return errors.Errorf("source can't be a URL for COPY") return errors.Errorf("source can't be a URL for COPY")
} }
} else { } else {
// Treat the source, which is not a URL, as a sources = append(sources, filepath.Join(contextDir, src))
// location relative to the
// all-content-comes-from-below-this-directory
// directory. Also raise an error if the src
// escapes the context directory.
srcSecure, err := securejoin.SecureJoin(contextDir, src)
if err == nil && strings.HasPrefix(src, "../") {
err = errors.New("escaping context directory error")
}
if err != nil {
return errors.Wrapf(err, "forbidden path for %q, it is outside of the build context %q", src, contextDir)
}
if hadFinalPathSeparator {
// If destination is a folder, we need to take extra care to
// ensure that files are copied with correct names (since
// resolving a symlink may result in a different name).
_, srcName := filepath.Split(src)
_, srcNameSecure := filepath.Split(srcSecure)
if srcName != srcNameSecure {
options := buildah.AddAndCopyOptions{
Chown: copy.Chown,
ContextDir: contextDir,
Excludes: copyExcludes,
IDMappingOptions: idMappingOptions,
}
// If we've a tar file, it will create a directory using the name of the tar
// file if we don't blank it out.
if copy.Download && (strings.HasSuffix(srcName, ".tar") || strings.HasSuffix(srcName, ".gz")) {
srcName = ""
}
if err := s.builder.Add(filepath.Join(copy.Dest, srcName), copy.Download, options, srcSecure); err != nil {
return err
}
continue
}
}
sources = append(sources, srcSecure)
} }
} }
options := buildah.AddAndCopyOptions{ options := buildah.AddAndCopyOptions{
@ -504,9 +321,11 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err
ContextDir: contextDir, ContextDir: contextDir,
Excludes: copyExcludes, Excludes: copyExcludes,
IDMappingOptions: idMappingOptions, IDMappingOptions: idMappingOptions,
StripSetuidBit: stripSetuid,
StripSetgidBit: stripSetgid,
} }
if err := s.builder.Add(copy.Dest, copy.Download, options, sources...); err != nil { if err := s.builder.Add(copy.Dest, copy.Download, options, sources...); err != nil {
return err return errors.Wrapf(err, "error adding sources %v", sources)
} }
} }
return nil return nil
@ -824,7 +643,6 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
imgID = imgID[0:11] imgID = imgID[0:11]
} }
if s.executor.iidfile == "" { if s.executor.iidfile == "" {
fmt.Fprintf(s.executor.out, "--> %s\n", imgID) fmt.Fprintf(s.executor.out, "--> %s\n", imgID)
} }
} }
@ -871,11 +689,8 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
s.executor.log("%s", step.Original) s.executor.log("%s", step.Original)
} }
// Check if there's a --from if the step command is COPY or // Check if there's a --from if the step command is COPY.
// ADD. Set copyFrom to point to either the context directory
// or the root of the container from the specified stage.
// Also check the chown flag for validity. // Also check the chown flag for validity.
s.copyFrom = s.executor.contextDir
for _, flag := range step.Flags { for _, flag := range step.Flags {
command := strings.ToUpper(step.Command) command := strings.ToUpper(step.Command)
// chown and from flags should have an '=' sign, '--chown=' or '--from=' // chown and from flags should have an '=' sign, '--chown=' or '--from='
@ -886,31 +701,27 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
return "", nil, errors.Errorf("ADD only supports the --chown=<uid:gid> flag") return "", nil, errors.Errorf("ADD only supports the --chown=<uid:gid> flag")
} }
if strings.Contains(flag, "--from") && command == "COPY" { if strings.Contains(flag, "--from") && command == "COPY" {
var mountPoint string
arr := strings.Split(flag, "=") arr := strings.Split(flag, "=")
if len(arr) != 2 { if len(arr) != 2 {
return "", nil, errors.Errorf("%s: invalid --from flag, should be --from=<name|stage>", command) return "", nil, errors.Errorf("%s: invalid --from flag, should be --from=<name|stage>", command)
} }
// If arr[1] has an argument within it, resolve it to its
// value. Otherwise just return the value found.
from, fromErr := imagebuilder.ProcessWord(arr[1], s.stage.Builder.Arguments())
if fromErr != nil {
return "", nil, errors.Wrapf(fromErr, "unable to resolve argument %q", arr[1])
}
// If the source's name corresponds to the // If the source's name corresponds to the
// result of an earlier stage, wait for that // result of an earlier stage, wait for that
// stage to finish being built. // stage to finish being built.
if isStage, err := s.executor.waitForStage(ctx, from, s.stages[:s.index]); isStage && err != nil {
// If arr[1] has an argument within it, resolve it to its
// value. Otherwise just return the value found.
var arr1Err error
arr[1], arr1Err = imagebuilder.ProcessWord(arr[1], s.stage.Builder.Arguments())
if arr1Err != nil {
return "", nil, errors.Wrapf(arr1Err, "unable to resolve argument %q", arr[1])
}
if isStage, err := s.executor.waitForStage(ctx, arr[1], s.stages[:s.index]); isStage && err != nil {
return "", nil, err return "", nil, err
} }
if otherStage, ok := s.executor.stages[arr[1]]; ok && otherStage.index < s.index { if otherStage, ok := s.executor.stages[from]; ok && otherStage.index < s.index {
mountPoint = otherStage.mountPoint break
} else if mountPoint, err = s.getImageRootfs(ctx, arr[1]); err != nil { } else if _, err = s.getImageRootfs(ctx, from); err != nil {
return "", nil, errors.Errorf("%s --from=%s: no stage or image found with that name", command, arr[1]) return "", nil, errors.Errorf("%s --from=%s: no stage or image found with that name", command, from)
} }
s.copyFrom = mountPoint
break break
} }
} }
@ -933,9 +744,14 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message) return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message)
} }
// In case we added content, retrieve its digest. // In case we added content, retrieve its digest.
addedContentDigest, err := s.digestSpecifiedContent(ctx, node, ib.Arguments(), ib.Config().Env) addedContentType, addedContentDigest := s.builder.ContentDigester.Digest()
if err != nil { addedContentSummary := addedContentType
return "", nil, err if addedContentDigest != "" {
if addedContentSummary != "" {
addedContentSummary = addedContentSummary + ":"
}
addedContentSummary = addedContentSummary + addedContentDigest.Encoded()
logrus.Debugf("added content %s", addedContentSummary)
} }
if moreInstructions { if moreInstructions {
// There are still more instructions to process // There are still more instructions to process
@ -943,16 +759,17 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
// instruction in the history that we'll write // instruction in the history that we'll write
// for the image when we eventually commit it. // for the image when we eventually commit it.
now := time.Now() now := time.Now()
s.builder.AddPrependedEmptyLayer(&now, s.getCreatedBy(node, addedContentDigest), "", "") s.builder.AddPrependedEmptyLayer(&now, s.getCreatedBy(node, addedContentSummary), "", "")
continue continue
} else { } else {
// This is the last instruction for this stage, // This is the last instruction for this stage,
// so we should commit this container to create // so we should commit this container to create
// an image, but only if it's the last one, or // an image, but only if it's the last stage,
// if it's used as the basis for a later stage. // or if it's used as the basis for a later
// stage.
if lastStage || imageIsUsedLater { if lastStage || imageIsUsedLater {
logCommit(s.output, i) logCommit(s.output, i)
imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentDigest), false, s.output) imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), false, s.output)
if err != nil { if err != nil {
return "", nil, errors.Wrapf(err, "error committing container for step %+v", *step) return "", nil, errors.Wrapf(err, "error committing container for step %+v", *step)
} }
@ -970,6 +787,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
cacheID string cacheID string
err error err error
rebase bool rebase bool
addedContentSummary string
) )
// If we have to commit for this instruction, only assign the // If we have to commit for this instruction, only assign the
@ -978,46 +796,47 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
commitName = s.output commitName = s.output
} }
// If we're using the cache, and we've managed to stick with // Check if there's already an image based on our parent that
// cached images so far, look for one that matches what we // has the same change that we're about to make, so far as we
// expect to produce for this instruction. // can tell.
if checkForLayers && !(s.executor.squash && lastInstruction && lastStage) { if checkForLayers {
addedContentDigest, err := s.digestSpecifiedContent(ctx, node, ib.Arguments(), ib.Config().Env) cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary)
if err != nil {
return "", nil, err
}
cacheID, err = s.intermediateImageExists(ctx, node, addedContentDigest)
if err != nil { if err != nil {
return "", nil, errors.Wrap(err, "error checking if cached image exists from a previous build") return "", nil, errors.Wrap(err, "error checking if cached image exists from a previous build")
} }
if cacheID != "" {
// Note the cache hit.
logCacheHit(cacheID)
} else {
// We're not going to find any more cache hits.
checkForLayers = false
}
} }
if cacheID != "" { // If we didn't find a cache entry, or we need to add content
// A suitable cached image was found, so just reuse it. // to find the digest of the content to check for a cached
// If we need to name the resulting image because it's // image, run the step so that we can check if the result
// the last step in this stage, add the name to the // matches a cache.
// image. if cacheID == "" {
imgID = cacheID // Process the instruction directly.
if commitName != "" { if err = ib.Run(step, s, noRunsRemaining); err != nil {
logCommit(commitName, i) logrus.Debugf("%v", errors.Wrapf(err, "error building at step %+v", *step))
if imgID, ref, err = s.tagExistingImage(ctx, cacheID, commitName); err != nil { return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message)
return "", nil, err
} }
logImageID(imgID)
// In case we added content, retrieve its digest.
addedContentType, addedContentDigest := s.builder.ContentDigester.Digest()
addedContentSummary = addedContentType
if addedContentDigest != "" {
if addedContentSummary != "" {
addedContentSummary = addedContentSummary + ":"
} }
// Update our working container to be based off of the addedContentSummary = addedContentSummary + addedContentDigest.Encoded()
// cached image, if we might need to use it as a basis logrus.Debugf("added content %s", addedContentSummary)
// for the next instruction, or if we need the root }
// filesystem to match the image contents for the sake
// of a later stage that wants to copy content from it. // Check if there's already an image based on our parent that
rebase = moreInstructions || rootfsIsUsedLater // has the same change that we just made.
if checkForLayers {
cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary)
if err != nil {
return "", nil, errors.Wrap(err, "error checking if cached image exists from a previous build")
}
}
} else {
// If the instruction would affect our configuration, // If the instruction would affect our configuration,
// process the configuration change so that, if we fall // process the configuration change so that, if we fall
// off the cache path, the filesystem changes from the // off the cache path, the filesystem changes from the
@ -1031,34 +850,41 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message) return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message)
} }
} }
} else {
// If we didn't find a cached image that we could just reuse,
// process the instruction directly.
err := ib.Run(step, s, noRunsRemaining)
if err != nil {
logrus.Debugf("%v", errors.Wrapf(err, "error building at step %+v", *step))
return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message)
} }
// In case we added content, retrieve its digest.
addedContentDigest, err := s.digestSpecifiedContent(ctx, node, ib.Arguments(), ib.Config().Env) if cacheID != "" && !(s.executor.squash && lastInstruction) {
if err != nil { logCacheHit(cacheID)
// A suitable cached image was found, so we can just
// reuse it. If we need to add a name to the resulting
// image because it's the last step in this stage, add
// the name to the image.
imgID = cacheID
if commitName != "" {
logCommit(commitName, i)
if imgID, ref, err = s.tagExistingImage(ctx, cacheID, commitName); err != nil {
return "", nil, err return "", nil, err
} }
// Create a new image, maybe with a new layer. }
} else {
// We're not going to find any more cache hits, so we
// can stop looking for them.
checkForLayers = false
// Create a new image, maybe with a new layer, with the
// name for this stage if it's the last instruction.
logCommit(s.output, i) logCommit(s.output, i)
imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentDigest), !s.stepRequiresLayer(step), commitName) imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), !s.stepRequiresLayer(step), commitName)
if err != nil { if err != nil {
return "", nil, errors.Wrapf(err, "error committing container for step %+v", *step) return "", nil, errors.Wrapf(err, "error committing container for step %+v", *step)
} }
logImageID(imgID)
// We only need to build a new container rootfs
// using this image if we plan on making
// further changes to it. Subsequent stages
// that just want to use the rootfs as a source
// for COPY or ADD will be content with what we
// already have.
rebase = moreInstructions
} }
logImageID(imgID)
// Update our working container to be based off of the cached
// image, if we might need to use it as a basis for the next
// instruction, or if we need the root filesystem to match the
// image contents for the sake of a later stage that wants to
// copy content from it.
rebase = moreInstructions || rootfsIsUsedLater
if rebase { if rebase {
// Since we either committed the working container or // Since we either committed the working container or
@ -1109,7 +935,7 @@ func historyEntriesEqual(base, derived v1.History) bool {
// base image (if we have one), plus the current instruction. // base image (if we have one), plus the current instruction.
// Used to verify whether a cache of the intermediate image exists and whether // Used to verify whether a cache of the intermediate image exists and whether
// to run the build again. // to run the build again.
func (s *StageExecutor) historyMatches(baseHistory []v1.History, child *parser.Node, history []v1.History, addedContentDigest string) bool { func (s *StageExecutor) historyMatches(baseHistory []v1.History, child *parser.Node, history []v1.History, addedContentSummary string) bool {
if len(baseHistory) >= len(history) { if len(baseHistory) >= len(history) {
return false return false
} }
@ -1121,13 +947,13 @@ func (s *StageExecutor) historyMatches(baseHistory []v1.History, child *parser.N
return false return false
} }
} }
return history[len(baseHistory)].CreatedBy == s.getCreatedBy(child, addedContentDigest) return history[len(baseHistory)].CreatedBy == s.getCreatedBy(child, addedContentSummary)
} }
// getCreatedBy returns the command the image at node will be created by. If // getCreatedBy returns the command the image at node will be created by. If
// the passed-in CompositeDigester is not nil, it is assumed to have the digest // the passed-in CompositeDigester is not nil, it is assumed to have the digest
// information for the content if the node is ADD or COPY. // information for the content if the node is ADD or COPY.
func (s *StageExecutor) getCreatedBy(node *parser.Node, addedContentDigest string) string { func (s *StageExecutor) getCreatedBy(node *parser.Node, addedContentSummary string) string {
if node == nil { if node == nil {
return "/bin/sh" return "/bin/sh"
} }
@ -1143,7 +969,7 @@ func (s *StageExecutor) getCreatedBy(node *parser.Node, addedContentDigest strin
for destination.Next != nil { for destination.Next != nil {
destination = destination.Next destination = destination.Next
} }
return "/bin/sh -c #(nop) " + strings.ToUpper(node.Value) + " " + addedContentDigest + " in " + destination.Value + " " return "/bin/sh -c #(nop) " + strings.ToUpper(node.Value) + " " + addedContentSummary + " in " + destination.Value + " "
default: default:
return "/bin/sh -c #(nop) " + node.Original return "/bin/sh -c #(nop) " + node.Original
} }
@ -1373,29 +1199,5 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer
} }
func (s *StageExecutor) EnsureContainerPath(path string) error { func (s *StageExecutor) EnsureContainerPath(path string) error {
targetPath, err := securejoin.SecureJoin(s.mountPoint, path) return copier.Mkdir(s.mountPoint, path, copier.MkdirOptions{})
if err != nil {
return errors.Wrapf(err, "error ensuring container path %q", path)
}
_, err = os.Stat(targetPath)
if err != nil && os.IsNotExist(err) {
err = os.MkdirAll(targetPath, 0755)
if err != nil {
return errors.Wrapf(err, "error creating directory path %q", targetPath)
}
// get the uid and gid so that we can set the correct permissions on the
// working directory
uid, gid, _, err := chrootuser.GetUser(s.mountPoint, s.builder.User())
if err != nil {
return errors.Wrapf(err, "error getting uid and gid for user %q", s.builder.User())
}
if err = os.Chown(targetPath, int(uid), int(gid)); err != nil {
return errors.Wrapf(err, "error setting ownership on %q", targetPath)
}
}
if err != nil {
return errors.Wrapf(err, "error ensuring container path %q", path)
}
return nil
} }

View File

@ -316,7 +316,7 @@ func addCommonOptsToSpec(commonOpts *CommonBuildOptions, g *generate.Generator)
return nil return nil
} }
func runSetupBuiltinVolumes(mountLabel, mountPoint, containerDir string, copyWithTar func(srcPath, dstPath string) error, builtinVolumes []string, rootUID, rootGID int) ([]specs.Mount, error) { func runSetupBuiltinVolumes(mountLabel, mountPoint, containerDir string, builtinVolumes []string, rootUID, rootGID int) ([]specs.Mount, error) {
var mounts []specs.Mount var mounts []specs.Mount
hostOwner := idtools.IDPair{UID: rootUID, GID: rootGID} hostOwner := idtools.IDPair{UID: rootUID, GID: rootGID}
// Add temporary copies of the contents of volume locations at the // Add temporary copies of the contents of volume locations at the
@ -359,7 +359,7 @@ func runSetupBuiltinVolumes(mountLabel, mountPoint, containerDir string, copyWit
if err = os.Chown(volumePath, int(stat.Sys().(*syscall.Stat_t).Uid), int(stat.Sys().(*syscall.Stat_t).Gid)); err != nil { if err = os.Chown(volumePath, int(stat.Sys().(*syscall.Stat_t).Uid), int(stat.Sys().(*syscall.Stat_t).Gid)); err != nil {
return nil, errors.Wrapf(err, "error chowning directory %q for volume %q", volumePath, volume) return nil, errors.Wrapf(err, "error chowning directory %q for volume %q", volumePath, volume)
} }
if err = copyWithTar(srcPath, volumePath); err != nil && !os.IsNotExist(errors.Cause(err)) { if err = extractWithTar(mountPoint, srcPath, volumePath); err != nil && !os.IsNotExist(errors.Cause(err)) {
return nil, errors.Wrapf(err, "error populating directory %q for volume %q using contents of %q", volumePath, volume, srcPath) return nil, errors.Wrapf(err, "error populating directory %q for volume %q using contents of %q", volumePath, volume, srcPath)
} }
} }
@ -483,8 +483,7 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath st
// Add temporary copies of the contents of volume locations at the // Add temporary copies of the contents of volume locations at the
// volume locations, unless we already have something there. // volume locations, unless we already have something there.
copyWithTar := b.copyWithTar(nil, nil, nil, false) builtins, err := runSetupBuiltinVolumes(b.MountLabel, mountPoint, cdir, builtinVolumes, int(rootUID), int(rootGID))
builtins, err := runSetupBuiltinVolumes(b.MountLabel, mountPoint, cdir, copyWithTar, builtinVolumes, int(rootUID), int(rootGID))
if err != nil { if err != nil {
return err return err
} }

View File

@ -7,6 +7,10 @@ import (
selinux "github.com/opencontainers/selinux/go-selinux" selinux "github.com/opencontainers/selinux/go-selinux"
) )
func selinuxGetEnabled() bool {
return selinux.GetEnabled()
}
func setupSelinux(g *generate.Generator, processLabel, mountLabel string) { func setupSelinux(g *generate.Generator, processLabel, mountLabel string) {
if processLabel != "" && selinux.GetEnabled() { if processLabel != "" && selinux.GetEnabled() {
g.SetProcessSelinuxLabel(processLabel) g.SetProcessSelinuxLabel(processLabel)

View File

@ -6,5 +6,9 @@ import (
"github.com/opencontainers/runtime-tools/generate" "github.com/opencontainers/runtime-tools/generate"
) )
func selinuxGetEnabled() bool {
return false
}
func setupSelinux(g *generate.Generator, processLabel, mountLabel string) { func setupSelinux(g *generate.Generator, processLabel, mountLabel string) {
} }

View File

@ -29,9 +29,10 @@ load helpers
run_buildah add $cid ${TESTDIR}/randomfile /subdir run_buildah add $cid ${TESTDIR}/randomfile /subdir
# Copy two files to a specific subdirectory # Copy two files to a specific subdirectory
run_buildah add $cid ${TESTDIR}/randomfile ${TESTDIR}/other-randomfile /other-subdir run_buildah add $cid ${TESTDIR}/randomfile ${TESTDIR}/other-randomfile /other-subdir
# Copy two files to a specific location, which succeeds because we can create it as a directory.
run_buildah add $cid ${TESTDIR}/randomfile ${TESTDIR}/other-randomfile /notthereyet-subdir
# Copy two files to a specific location, which fails because it's not a directory. # Copy two files to a specific location, which fails because it's not a directory.
run_buildah 125 add ${TESTDIR}/randomfile ${TESTDIR}/other-randomfile $cid /notthereyet-subdir run_buildah 125 add $cid ${TESTDIR}/randomfile ${TESTDIR}/other-randomfile /randomfile
run_buildah 125 add ${TESTDIR}/randomfile $cid ${TESTDIR}/other-randomfile /randomfile
# Copy a file to a different working directory # Copy a file to a different working directory
run_buildah config --workingdir=/cwd $cid run_buildah config --workingdir=/cwd $cid
run_buildah add $cid ${TESTDIR}/randomfile run_buildah add $cid ${TESTDIR}/randomfile

View File

@ -1738,12 +1738,20 @@ _EOF
run stat -c "%d:%i" ${root}/subdir/test1.txt run stat -c "%d:%i" ${root}/subdir/test1.txt
id1=$output id1=$output
run stat -c "%h" ${root}/subdir/test1.txt
expect_output 4
run stat -c "%d:%i" ${root}/subdir/test2.txt run stat -c "%d:%i" ${root}/subdir/test2.txt
expect_output $id1 "stat(test2) == stat(test1)" expect_output $id1 "stat(test2) == stat(test1)"
run stat -c "%h" ${root}/subdir/test2.txt
expect_output 4
run stat -c "%d:%i" ${root}/test3.txt run stat -c "%d:%i" ${root}/test3.txt
expect_output $id1 "stat(test3) == stat(test1)" expect_output $id1 "stat(test3) == stat(test1)"
run stat -c "%h" ${root}/test3.txt
expect_output 4
run stat -c "%d:%i" ${root}/test4.txt run stat -c "%d:%i" ${root}/test4.txt
expect_output $id1 "stat(test4) == stat(test1)" expect_output $id1 "stat(test4) == stat(test1)"
run stat -c "%h" ${root}/test4.txt
expect_output 4
} }
@test "bud without any arguments should succeed" { @test "bud without any arguments should succeed" {

View File

@ -709,10 +709,10 @@ type FSHeader struct {
Typeflag byte `json:"typeflag,omitempty"` Typeflag byte `json:"typeflag,omitempty"`
Name string `json:"name,omitempty"` Name string `json:"name,omitempty"`
Linkname string `json:"linkname,omitempty"` Linkname string `json:"linkname,omitempty"`
Size int64 `json:"size,omitempty"` Size int64 `json:"size"`
Mode int64 `json:"mode,omitempty"` Mode int64 `json:"mode,omitempty"`
UID int `json:"uid,omitempty"` UID int `json:"uid"`
GID int `json:"gid,omitempty"` GID int `json:"gid"`
ModTime time.Time `json:"mtime,omitempty"` ModTime time.Time `json:"mtime,omitempty"`
Devmajor int64 `json:"devmanor,omitempty"` Devmajor int64 `json:"devmanor,omitempty"`
Devminor int64 `json:"devminor,omitempty"` Devminor int64 `json:"devminor,omitempty"`

View File

@ -23,8 +23,14 @@ load helpers
run_buildah mount $cid run_buildah mount $cid
root=$output root=$output
run_buildah config --workingdir / $cid run_buildah config --workingdir / $cid
# copy ${TESTDIR}/randomfile to a file of the same name in the container's working directory
run_buildah copy $cid ${TESTDIR}/randomfile run_buildah copy $cid ${TESTDIR}/randomfile
run_buildah 125 copy $cid ${TESTDIR}/other-randomfile ${TESTDIR}/third-randomfile ${TESTDIR}/randomfile # copy ${TESTDIR}/other-randomfile and ${TESTDIR}/third-randomfile to a new directory named ${TESTDIR}/randomfile in the container
run_buildah copy $cid ${TESTDIR}/other-randomfile ${TESTDIR}/third-randomfile ${TESTDIR}/randomfile
# try to copy ${TESTDIR}/other-randomfile and ${TESTDIR}/third-randomfile to a /randomfile, which already exists and is a file
run_buildah 125 copy $cid ${TESTDIR}/other-randomfile ${TESTDIR}/third-randomfile /randomfile
# copy ${TESTDIR}/other-randomfile and ${TESTDIR}/third-randomfile to previously-created directory named ${TESTDIR}/randomfile in the container
run_buildah copy $cid ${TESTDIR}/other-randomfile ${TESTDIR}/third-randomfile ${TESTDIR}/randomfile
run_buildah rm $cid run_buildah rm $cid
_prefetch alpine _prefetch alpine

287
util.go
View File

@ -1,26 +1,20 @@
package buildah package buildah
import ( import (
"archive/tar"
"io" "io"
"io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"sync"
"github.com/containers/buildah/util" "github.com/containers/buildah/copier"
"github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/pkg/sysregistriesv2" "github.com/containers/image/v5/pkg/sysregistriesv2"
"github.com/containers/image/v5/types" "github.com/containers/image/v5/types"
"github.com/containers/storage" "github.com/containers/storage"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/chrootarchive"
"github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/pools"
"github.com/containers/storage/pkg/reexec" "github.com/containers/storage/pkg/reexec"
"github.com/containers/storage/pkg/system"
v1 "github.com/opencontainers/image-spec/specs-go/v1" v1 "github.com/opencontainers/image-spec/specs-go/v1"
rspec "github.com/opencontainers/runtime-spec/specs-go" rspec "github.com/opencontainers/runtime-spec/specs-go"
selinux "github.com/opencontainers/selinux/go-selinux"
"github.com/opencontainers/selinux/go-selinux/label" "github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
@ -109,245 +103,6 @@ func convertRuntimeIDMaps(UIDMap, GIDMap []rspec.LinuxIDMapping) ([]idtools.IDMa
return uidmap, gidmap return uidmap, gidmap
} }
// copyFileWithTar returns a function which copies a single file from outside
// of any container, or another container, into our working container, mapping
// read permissions using the passed-in ID maps, writing using the container's
// ID mappings, possibly overridden using the passed-in chownOpts
func (b *Builder) copyFileWithTar(tarIDMappingOptions *IDMappingOptions, chownOpts *idtools.IDPair, hasher io.Writer, dryRun bool) func(src, dest string) error {
if tarIDMappingOptions == nil {
tarIDMappingOptions = &IDMappingOptions{
HostUIDMapping: true,
HostGIDMapping: true,
}
}
var hardlinkChecker util.HardlinkChecker
return func(src, dest string) error {
var f *os.File
logrus.Debugf("copyFileWithTar(%s, %s)", src, dest)
fi, err := os.Lstat(src)
if err != nil {
return errors.Wrapf(err, "error reading attributes of %q", src)
}
sysfi, err := system.Lstat(src)
if err != nil {
return errors.Wrapf(err, "error reading attributes of %q", src)
}
hostUID := sysfi.UID()
hostGID := sysfi.GID()
containerUID, containerGID, err := util.GetContainerIDs(tarIDMappingOptions.UIDMap, tarIDMappingOptions.GIDMap, hostUID, hostGID)
if err != nil {
return errors.Wrapf(err, "error mapping owner IDs of %q: %d/%d", src, hostUID, hostGID)
}
hdr, err := tar.FileInfoHeader(fi, filepath.Base(src))
if err != nil {
return errors.Wrapf(err, "error generating tar header for: %q", src)
}
chrootedDest, err := filepath.Rel(b.MountPoint, dest)
if err != nil {
return errors.Wrapf(err, "error generating relative-to-chroot target name for %q", dest)
}
hdr.Name = chrootedDest
hdr.Uid = int(containerUID)
hdr.Gid = int(containerGID)
if fi.Mode().IsRegular() && hdr.Typeflag == tar.TypeReg {
if linkname := hardlinkChecker.Check(fi); linkname != "" {
hdr.Typeflag = tar.TypeLink
hdr.Linkname = linkname
} else {
hardlinkChecker.Add(fi, chrootedDest)
f, err = os.Open(src)
if err != nil {
return errors.Wrapf(err, "error opening %q to copy its contents", src)
}
}
}
if fi.Mode()&os.ModeSymlink == os.ModeSymlink && hdr.Typeflag == tar.TypeSymlink {
hdr.Typeflag = tar.TypeSymlink
linkName, err := os.Readlink(src)
if err != nil {
return errors.Wrapf(err, "error reading destination from symlink %q", src)
}
hdr.Linkname = linkName
}
pipeReader, pipeWriter := io.Pipe()
writer := tar.NewWriter(pipeWriter)
var copyErr error
go func(srcFile *os.File) {
err := writer.WriteHeader(hdr)
if err != nil {
logrus.Debugf("error writing header for %s: %v", srcFile.Name(), err)
copyErr = err
}
if srcFile != nil {
n, err := pools.Copy(writer, srcFile)
if n != hdr.Size {
logrus.Debugf("expected to write %d bytes for %s, wrote %d instead", hdr.Size, srcFile.Name(), n)
}
if err != nil {
logrus.Debugf("error copying contents of %s: %v", fi.Name(), err)
copyErr = err
}
if err = srcFile.Close(); err != nil {
logrus.Debugf("error closing %s: %v", fi.Name(), err)
}
}
if err = writer.Close(); err != nil {
logrus.Debugf("error closing write pipe for %s: %v", hdr.Name, err)
}
pipeWriter.Close()
pipeWriter = nil
}(f)
untar := b.untar(chownOpts, hasher, dryRun)
err = untar(pipeReader, b.MountPoint)
if err == nil {
err = copyErr
}
if pipeWriter != nil {
pipeWriter.Close()
}
return err
}
}
// copyWithTar returns a function which copies a directory tree from outside of
// our container or from another container, into our working container, mapping
// permissions at read-time using the container's ID maps, with ownership at
// write-time possibly overridden using the passed-in chownOpts
func (b *Builder) copyWithTar(tarIDMappingOptions *IDMappingOptions, chownOpts *idtools.IDPair, hasher io.Writer, dryRun bool) func(src, dest string) error {
tar := b.tarPath(tarIDMappingOptions)
return func(src, dest string) error {
thisHasher := hasher
if thisHasher != nil && b.ContentDigester.Hash() != nil {
thisHasher = io.MultiWriter(thisHasher, b.ContentDigester.Hash())
}
if thisHasher == nil {
thisHasher = b.ContentDigester.Hash()
}
untar := b.untar(chownOpts, thisHasher, dryRun)
rc, err := tar(src)
if err != nil {
return errors.Wrapf(err, "error archiving %q for copy", src)
}
return untar(rc, dest)
}
}
// untarPath returns a function which extracts an archive in a specified
// location into our working container, mapping permissions using the
// container's ID maps, possibly overridden using the passed-in chownOpts
func (b *Builder) untarPath(chownOpts *idtools.IDPair, hasher io.Writer, dryRun bool) func(src, dest string) error {
convertedUIDMap, convertedGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap)
if dryRun {
return func(src, dest string) error {
thisHasher := hasher
if thisHasher != nil && b.ContentDigester.Hash() != nil {
thisHasher = io.MultiWriter(thisHasher, b.ContentDigester.Hash())
}
if thisHasher == nil {
thisHasher = b.ContentDigester.Hash()
}
f, err := os.Open(src)
if err != nil {
return errors.Wrapf(err, "error opening %q", src)
}
defer f.Close()
_, err = io.Copy(thisHasher, f)
return err
}
}
return func(src, dest string) error {
thisHasher := hasher
if thisHasher != nil && b.ContentDigester.Hash() != nil {
thisHasher = io.MultiWriter(thisHasher, b.ContentDigester.Hash())
}
if thisHasher == nil {
thisHasher = b.ContentDigester.Hash()
}
untarPathAndChown := chrootarchive.UntarPathAndChown(chownOpts, thisHasher, convertedUIDMap, convertedGIDMap)
return untarPathAndChown(src, dest)
}
}
// tarPath returns a function which creates an archive of a specified location,
// which is often somewhere in the container's filesystem, mapping permissions
// using the container's ID maps, or the passed-in maps if specified
func (b *Builder) tarPath(idMappingOptions *IDMappingOptions) func(path string) (io.ReadCloser, error) {
var uidmap, gidmap []idtools.IDMap
if idMappingOptions == nil {
idMappingOptions = &IDMappingOptions{
HostUIDMapping: true,
HostGIDMapping: true,
}
}
convertedUIDMap, convertedGIDMap := convertRuntimeIDMaps(idMappingOptions.UIDMap, idMappingOptions.GIDMap)
tarMappings := idtools.NewIDMappingsFromMaps(convertedUIDMap, convertedGIDMap)
uidmap = tarMappings.UIDs()
gidmap = tarMappings.GIDs()
options := &archive.TarOptions{
Compression: archive.Uncompressed,
UIDMaps: uidmap,
GIDMaps: gidmap,
}
return func(path string) (io.ReadCloser, error) {
return archive.TarWithOptions(path, options)
}
}
// untar returns a function which extracts an archive stream to a specified
// location in the container's filesystem, mapping permissions using the
// container's ID maps, possibly overridden using the passed-in chownOpts
func (b *Builder) untar(chownOpts *idtools.IDPair, hasher io.Writer, dryRun bool) func(tarArchive io.ReadCloser, dest string) error {
convertedUIDMap, convertedGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap)
untarMappings := idtools.NewIDMappingsFromMaps(convertedUIDMap, convertedGIDMap)
options := &archive.TarOptions{
UIDMaps: untarMappings.UIDs(),
GIDMaps: untarMappings.GIDs(),
ChownOpts: chownOpts,
}
untar := chrootarchive.Untar
if dryRun {
untar = func(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
if _, err := io.Copy(ioutil.Discard, tarArchive); err != nil {
return errors.Wrapf(err, "error digesting tar stream")
}
return nil
}
}
originalUntar := untar
untarWithHasher := func(tarArchive io.Reader, dest string, options *archive.TarOptions, untarHasher io.Writer) error {
reader := tarArchive
if untarHasher != nil {
reader = io.TeeReader(tarArchive, untarHasher)
}
return originalUntar(reader, dest, options)
}
return func(tarArchive io.ReadCloser, dest string) error {
thisHasher := hasher
if thisHasher != nil && b.ContentDigester.Hash() != nil {
thisHasher = io.MultiWriter(thisHasher, b.ContentDigester.Hash())
}
if thisHasher == nil {
thisHasher = b.ContentDigester.Hash()
}
err := untarWithHasher(tarArchive, dest, options, thisHasher)
if err2 := tarArchive.Close(); err2 != nil {
if err == nil {
err = err2
}
}
return err
}
}
// isRegistryBlocked checks if the named registry is marked as blocked // isRegistryBlocked checks if the named registry is marked as blocked
func isRegistryBlocked(registry string, sc *types.SystemContext) (bool, error) { func isRegistryBlocked(registry string, sc *types.SystemContext) (bool, error) {
reginfo, err := sysregistriesv2.FindRegistry(sc, registry) reginfo, err := sysregistriesv2.FindRegistry(sc, registry)
@ -389,10 +144,10 @@ func isReferenceBlocked(ref types.ImageReference, sc *types.SystemContext) (bool
return false, nil return false, nil
} }
// ReserveSELinuxLabels reads containers storage and reserves SELinux containers // ReserveSELinuxLabels reads containers storage and reserves SELinux contexts
// fall all existing buildah containers // which are already being used by buildah containers.
func ReserveSELinuxLabels(store storage.Store, id string) error { func ReserveSELinuxLabels(store storage.Store, id string) error {
if selinux.GetEnabled() { if selinuxGetEnabled() {
containers, err := store.Containers() containers, err := store.Containers()
if err != nil { if err != nil {
return errors.Wrapf(err, "error getting list of containers") return errors.Wrapf(err, "error getting list of containers")
@ -438,3 +193,35 @@ func IsContainer(id string, store storage.Store) (bool, error) {
} }
return true, nil return true, nil
} }
// Copy content from the directory "src" to the directory "dest", ensuring that
// content from outside of "root" (which is a parent of "src" or "src" itself)
// isn't read.
func extractWithTar(root, src, dest string) error {
var getErr, putErr error
var wg sync.WaitGroup
pipeReader, pipeWriter := io.Pipe()
wg.Add(1)
go func() {
getErr = copier.Get(root, src, copier.GetOptions{}, []string{"."}, pipeWriter)
pipeWriter.Close()
wg.Done()
}()
wg.Add(1)
go func() {
putErr = copier.Put(dest, dest, copier.PutOptions{}, pipeReader)
pipeReader.Close()
wg.Done()
}()
wg.Wait()
if getErr != nil {
return errors.Wrapf(getErr, "error reading %q", src)
}
if putErr != nil {
return errors.Wrapf(putErr, "error copying contents of %q to %q", src, dest)
}
return nil
}

View File

@ -1,19 +0,0 @@
# Copyright (C) 2017 SUSE LLC. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
language: go
go:
- 1.7.x
- 1.8.x
- tip
os:
- linux
- osx
script:
- go test -cover -v ./...
notifications:
email: false

View File

@ -1,28 +0,0 @@
Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved.
Copyright (C) 2017 SUSE LLC. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,65 +0,0 @@
## `filepath-securejoin` ##
[![Build Status](https://travis-ci.org/cyphar/filepath-securejoin.svg?branch=master)](https://travis-ci.org/cyphar/filepath-securejoin)
An implementation of `SecureJoin`, a [candidate for inclusion in the Go
standard library][go#20126]. The purpose of this function is to be a "secure"
alternative to `filepath.Join`, and in particular it provides certain
guarantees that are not provided by `filepath.Join`.
This is the function prototype:
```go
func SecureJoin(root, unsafePath string) (string, error)
```
This library **guarantees** the following:
* If no error is set, the resulting string **must** be a child path of
`SecureJoin` and will not contain any symlink path components (they will all
be expanded).
* When expanding symlinks, all symlink path components **must** be resolved
relative to the provided root. In particular, this can be considered a
userspace implementation of how `chroot(2)` operates on file paths. Note that
these symlinks will **not** be expanded lexically (`filepath.Clean` is not
called on the input before processing).
* Non-existant path components are unaffected by `SecureJoin` (similar to
`filepath.EvalSymlinks`'s semantics).
* The returned path will always be `filepath.Clean`ed and thus not contain any
`..` components.
A (trivial) implementation of this function on GNU/Linux systems could be done
with the following (note that this requires root privileges and is far more
opaque than the implementation in this library, and also requires that
`readlink` is inside the `root` path):
```go
package securejoin
import (
"os/exec"
"path/filepath"
)
func SecureJoin(root, unsafePath string) (string, error) {
unsafePath = string(filepath.Separator) + unsafePath
cmd := exec.Command("chroot", root,
"readlink", "--canonicalize-missing", "--no-newline", unsafePath)
output, err := cmd.CombinedOutput()
if err != nil {
return "", err
}
expanded := string(output)
return filepath.Join(root, expanded), nil
}
```
[go#20126]: https://github.com/golang/go/issues/20126
### License ###
The license of this project is the same as Go, which is a BSD 3-clause license
available in the `LICENSE` file.

View File

@ -1 +0,0 @@
0.2.2

View File

@ -1,134 +0,0 @@
// Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved.
// Copyright (C) 2017 SUSE LLC. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package securejoin is an implementation of the hopefully-soon-to-be-included
// SecureJoin helper that is meant to be part of the "path/filepath" package.
// The purpose of this project is to provide a PoC implementation to make the
// SecureJoin proposal (https://github.com/golang/go/issues/20126) more
// tangible.
package securejoin
import (
"bytes"
"os"
"path/filepath"
"strings"
"syscall"
"github.com/pkg/errors"
)
// ErrSymlinkLoop is returned by SecureJoinVFS when too many symlinks have been
// evaluated in attempting to securely join the two given paths.
var ErrSymlinkLoop = errors.Wrap(syscall.ELOOP, "secure join")
// IsNotExist tells you if err is an error that implies that either the path
// accessed does not exist (or path components don't exist). This is
// effectively a more broad version of os.IsNotExist.
func IsNotExist(err error) bool {
// If it's a bone-fide ENOENT just bail.
if os.IsNotExist(errors.Cause(err)) {
return true
}
// Check that it's not actually an ENOTDIR, which in some cases is a more
// convoluted case of ENOENT (usually involving weird paths).
var errno error
switch err := errors.Cause(err).(type) {
case *os.PathError:
errno = err.Err
case *os.LinkError:
errno = err.Err
case *os.SyscallError:
errno = err.Err
}
return errno == syscall.ENOTDIR || errno == syscall.ENOENT
}
// SecureJoinVFS joins the two given path components (similar to Join) except
// that the returned path is guaranteed to be scoped inside the provided root
// path (when evaluated). Any symbolic links in the path are evaluated with the
// given root treated as the root of the filesystem, similar to a chroot. The
// filesystem state is evaluated through the given VFS interface (if nil, the
// standard os.* family of functions are used).
//
// Note that the guarantees provided by this function only apply if the path
// components in the returned string are not modified (in other words are not
// replaced with symlinks on the filesystem) after this function has returned.
// Such a symlink race is necessarily out-of-scope of SecureJoin.
func SecureJoinVFS(root, unsafePath string, vfs VFS) (string, error) {
// Use the os.* VFS implementation if none was specified.
if vfs == nil {
vfs = osVFS{}
}
var path bytes.Buffer
n := 0
for unsafePath != "" {
if n > 255 {
return "", ErrSymlinkLoop
}
// Next path component, p.
i := strings.IndexRune(unsafePath, filepath.Separator)
var p string
if i == -1 {
p, unsafePath = unsafePath, ""
} else {
p, unsafePath = unsafePath[:i], unsafePath[i+1:]
}
// Create a cleaned path, using the lexical semantics of /../a, to
// create a "scoped" path component which can safely be joined to fullP
// for evaluation. At this point, path.String() doesn't contain any
// symlink components.
cleanP := filepath.Clean(string(filepath.Separator) + path.String() + p)
if cleanP == string(filepath.Separator) {
path.Reset()
continue
}
fullP := filepath.Clean(root + cleanP)
// Figure out whether the path is a symlink.
fi, err := vfs.Lstat(fullP)
if err != nil && !IsNotExist(err) {
return "", err
}
// Treat non-existent path components the same as non-symlinks (we
// can't do any better here).
if IsNotExist(err) || fi.Mode()&os.ModeSymlink == 0 {
path.WriteString(p)
path.WriteRune(filepath.Separator)
continue
}
// Only increment when we actually dereference a link.
n++
// It's a symlink, expand it by prepending it to the yet-unparsed path.
dest, err := vfs.Readlink(fullP)
if err != nil {
return "", err
}
// Absolute symlinks reset any work we've already done.
if filepath.IsAbs(dest) {
path.Reset()
}
unsafePath = dest + string(filepath.Separator) + unsafePath
}
// We have to clean path.String() here because it may contain '..'
// components that are entirely lexical, but would be misleading otherwise.
// And finally do a final clean to ensure that root is also lexically
// clean.
fullP := filepath.Clean(string(filepath.Separator) + path.String())
return filepath.Clean(root + fullP), nil
}
// SecureJoin is a wrapper around SecureJoinVFS that just uses the os.* library
// of functions as the VFS. If in doubt, use this function over SecureJoinVFS.
func SecureJoin(root, unsafePath string) (string, error) {
return SecureJoinVFS(root, unsafePath, nil)
}

View File

@ -1 +0,0 @@
github.com/pkg/errors v0.8.0

View File

@ -1,41 +0,0 @@
// Copyright (C) 2017 SUSE LLC. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package securejoin
import "os"
// In future this should be moved into a separate package, because now there
// are several projects (umoci and go-mtree) that are using this sort of
// interface.
// VFS is the minimal interface necessary to use SecureJoinVFS. A nil VFS is
// equivalent to using the standard os.* family of functions. This is mainly
// used for the purposes of mock testing, but also can be used to otherwise use
// SecureJoin with VFS-like system.
type VFS interface {
// Lstat returns a FileInfo describing the named file. If the file is a
// symbolic link, the returned FileInfo describes the symbolic link. Lstat
// makes no attempt to follow the link. These semantics are identical to
// os.Lstat.
Lstat(name string) (os.FileInfo, error)
// Readlink returns the destination of the named symbolic link. These
// semantics are identical to os.Readlink.
Readlink(name string) (string, error)
}
// osVFS is the "nil" VFS, in that it just passes everything through to the os
// module.
type osVFS struct{}
// Lstat returns a FileInfo describing the named file. If the file is a
// symbolic link, the returned FileInfo describes the symbolic link. Lstat
// makes no attempt to follow the link. These semantics are identical to
// os.Lstat.
func (o osVFS) Lstat(name string) (os.FileInfo, error) { return os.Lstat(name) }
// Readlink returns the destination of the named symbolic link. These
// semantics are identical to os.Readlink.
func (o osVFS) Readlink(name string) (string, error) { return os.Readlink(name) }

2
vendor/modules.txt vendored
View File

@ -159,8 +159,6 @@ github.com/containers/storage/pkg/truncindex
github.com/containers/storage/pkg/unshare github.com/containers/storage/pkg/unshare
# github.com/coreos/go-systemd/v22 v22.0.0 # github.com/coreos/go-systemd/v22 v22.0.0
github.com/coreos/go-systemd/v22/dbus github.com/coreos/go-systemd/v22/dbus
# github.com/cyphar/filepath-securejoin v0.2.2
github.com/cyphar/filepath-securejoin
# github.com/davecgh/go-spew v1.1.1 # github.com/davecgh/go-spew v1.1.1
github.com/davecgh/go-spew/spew github.com/davecgh/go-spew/spew
# github.com/docker/distribution v2.7.1+incompatible # github.com/docker/distribution v2.7.1+incompatible