Merge branch 'containers:main' into naveen/feat/set-perms-actions
This commit is contained in:
commit
af6e9f375c
|
@ -15,3 +15,11 @@ updates:
|
|||
time: "10:00"
|
||||
timezone: Europe/Berlin
|
||||
open-pull-requests-limit: 10
|
||||
|
||||
- package-ecosystem: github-actions
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: daily
|
||||
time: "10:00"
|
||||
timezone: Europe/Berlin
|
||||
open-pull-requests-limit: 10
|
||||
|
|
|
@ -74,7 +74,7 @@ jobs:
|
|||
- if: steps.cron.outputs.failures > 0
|
||||
name: Send failure notification e-mail
|
||||
# Ref: https://github.com/dawidd6/action-send-mail
|
||||
uses: dawidd6/action-send-mail@v2.2.2
|
||||
uses: dawidd6/action-send-mail@a80d851dc950256421f1d1d735a2dc1ef314ac8f # v2.2.2
|
||||
with:
|
||||
server_address: ${{secrets.ACTION_MAIL_SERVER}}
|
||||
server_port: 465
|
||||
|
@ -93,7 +93,7 @@ jobs:
|
|||
|
||||
- if: failure()
|
||||
name: Send error notification e-mail
|
||||
uses: dawidd6/action-send-mail@v2.2.2
|
||||
uses: dawidd6/action-send-mail@a80d851dc950256421f1d1d735a2dc1ef314ac8f # v2.2.2
|
||||
with:
|
||||
server_address: ${{secrets.ACTION_MAIL_SERVER}}
|
||||
server_port: 465
|
||||
|
|
|
@ -56,16 +56,16 @@ jobs:
|
|||
uses: actions/checkout@v2
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
uses: docker/setup-qemu-action@27d0a4f181a40b142cce983c5393082c365d1480 # v1
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
uses: docker/setup-buildx-action@94ab11c41e45d028884a99163086648e898eed25 # v1
|
||||
with:
|
||||
driver-opts: network=host
|
||||
install: true
|
||||
|
||||
- name: Build and locally push image
|
||||
uses: docker/build-push-action@v2
|
||||
uses: docker/build-push-action@ac9327eae2b366085ac7f6a2d02df8aa8ead720a # v2
|
||||
with:
|
||||
context: contrib/${{ env.REPONAME }}image/${{ matrix.source }}
|
||||
file: ./contrib/${{ env.REPONAME }}image/${{ matrix.source }}/Dockerfile
|
||||
|
@ -165,7 +165,7 @@ jobs:
|
|||
|
||||
# Push to $REPONAME_QUAY_REGISTRY for stable, testing. and upstream
|
||||
- name: Login to ${{ env.REPONAME_QUAY_REGISTRY }}
|
||||
uses: docker/login-action@v1
|
||||
uses: docker/login-action@dd4fa0671be5250ee6f50aedf4cb05514abda2c7 # v1
|
||||
if: steps.reponame_reg.outputs.push == 'true'
|
||||
with:
|
||||
registry: ${{ env.REPONAME_QUAY_REGISTRY }}
|
||||
|
@ -175,7 +175,7 @@ jobs:
|
|||
password: ${{ secrets.REPONAME_QUAY_PASSWORD }}
|
||||
|
||||
- name: Push images to ${{ steps.reponame_reg.outputs.fqin }}
|
||||
uses: docker/build-push-action@v2
|
||||
uses: docker/build-push-action@ac9327eae2b366085ac7f6a2d02df8aa8ead720a # v2
|
||||
if: steps.reponame_reg.outputs.push == 'true'
|
||||
with:
|
||||
cache-from: type=registry,ref=localhost:5000/${{ env.REPONAME }}/${{ matrix.source }}
|
||||
|
@ -191,7 +191,7 @@ jobs:
|
|||
# Push to $CONTAINERS_QUAY_REGISTRY only stable
|
||||
- name: Login to ${{ env.CONTAINERS_QUAY_REGISTRY }}
|
||||
if: steps.containers_reg.outputs.push == 'true'
|
||||
uses: docker/login-action@v1
|
||||
uses: docker/login-action@dd4fa0671be5250ee6f50aedf4cb05514abda2c7 # v1
|
||||
with:
|
||||
registry: ${{ env.CONTAINERS_QUAY_REGISTRY}}
|
||||
username: ${{ secrets.CONTAINERS_QUAY_USERNAME }}
|
||||
|
@ -199,7 +199,7 @@ jobs:
|
|||
|
||||
- name: Push images to ${{ steps.containers_reg.outputs.fqin }}
|
||||
if: steps.containers_reg.outputs.push == 'true'
|
||||
uses: docker/build-push-action@v2
|
||||
uses: docker/build-push-action@ac9327eae2b366085ac7f6a2d02df8aa8ead720a # v2
|
||||
with:
|
||||
cache-from: type=registry,ref=localhost:5000/${{ env.REPONAME }}/${{ matrix.source }}
|
||||
cache-to: type=inline
|
||||
|
|
|
@ -10,12 +10,12 @@ jobs:
|
|||
steps:
|
||||
- name: get pr commits
|
||||
id: 'get-pr-commits'
|
||||
uses: tim-actions/get-pr-commits@v1.1.0
|
||||
uses: tim-actions/get-pr-commits@55b867b9b28954e6f5c1a0fe2f729dc926c306d0 # v1.1.0
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: check subject line length
|
||||
uses: tim-actions/commit-message-checker-with-regex@v0.3.1
|
||||
uses: tim-actions/commit-message-checker-with-regex@d6d9770051dd6460679d1cab1dcaa8cffc5c2bbd # v0.3.1
|
||||
with:
|
||||
commits: ${{ steps.get-pr-commits.outputs.commits }}
|
||||
pattern: '^.{0,72}(\n.*)*$'
|
||||
|
|
12
CHANGELOG.md
12
CHANGELOG.md
|
@ -2,6 +2,18 @@
|
|||
|
||||
# Changelog
|
||||
|
||||
## v1.25.1 (2022-03-30)
|
||||
|
||||
buildah: create WORKDIR with USER permissions
|
||||
vendor: update github.com/openshift/imagebuilder
|
||||
copier: attempt to open the dir before adding it
|
||||
Updated dependabot to get updates for GitHub actions.
|
||||
Switch most calls to filepath.Walk to filepath.WalkDir
|
||||
build: allow --no-cache and --layers so build cache can be overrided
|
||||
build(deps): bump github.com/onsi/gomega from 1.18.1 to 1.19.0
|
||||
Bump to v1.26.0-dev
|
||||
build(deps): bump github.com/golangci/golangci-lint in /tests/tools
|
||||
|
||||
## v1.25.0 (2022-03-25)
|
||||
|
||||
install: drop RHEL/CentOS 7 doc
|
||||
|
|
34
add.go
34
add.go
|
@ -655,3 +655,37 @@ func (b *Builder) userForCopy(mountPoint string, userspec string) (uint32, uint3
|
|||
}
|
||||
return owner.UID, owner.GID, nil
|
||||
}
|
||||
|
||||
// EnsureContainerPathAs creates the specified directory owned by USER
|
||||
// with the file mode set to MODE.
|
||||
func (b *Builder) EnsureContainerPathAs(path, user string, mode *os.FileMode) error {
|
||||
mountPoint, err := b.Mount(b.MountLabel)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err2 := b.Unmount(); err2 != nil {
|
||||
logrus.Errorf("error unmounting container: %v", err2)
|
||||
}
|
||||
}()
|
||||
|
||||
uid, gid := uint32(0), uint32(0)
|
||||
if user != "" {
|
||||
if uidForCopy, gidForCopy, err := b.userForCopy(mountPoint, user); err == nil {
|
||||
uid = uidForCopy
|
||||
gid = gidForCopy
|
||||
}
|
||||
}
|
||||
|
||||
destUIDMap, destGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap)
|
||||
|
||||
idPair := &idtools.IDPair{UID: int(uid), GID: int(gid)}
|
||||
opts := copier.MkdirOptions{
|
||||
ChmodNew: mode,
|
||||
ChownNew: idPair,
|
||||
UIDMap: destUIDMap,
|
||||
GIDMap: destGIDMap,
|
||||
}
|
||||
return copier.Mkdir(mountPoint, filepath.Join(mountPoint, path), opts)
|
||||
|
||||
}
|
||||
|
|
|
@ -1,3 +1,14 @@
|
|||
- Changelog for v1.25.1 (2022-03-30)
|
||||
* buildah: create WORKDIR with USER permissions
|
||||
* vendor: update github.com/openshift/imagebuilder
|
||||
* copier: attempt to open the dir before adding it
|
||||
* Updated dependabot to get updates for GitHub actions.
|
||||
* Switch most calls to filepath.Walk to filepath.WalkDir
|
||||
* build: allow --no-cache and --layers so build cache can be overrided
|
||||
* build(deps): bump github.com/onsi/gomega from 1.18.1 to 1.19.0
|
||||
* Bump to v1.26.0-dev
|
||||
* build(deps): bump github.com/golangci/golangci-lint in /tests/tools
|
||||
|
||||
- Changelog for v1.25.0 (2022-03-25)
|
||||
* install: drop RHEL/CentOS 7 doc
|
||||
* build(deps): bump github.com/containers/common from 0.47.4 to 0.47.5
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
|
@ -159,10 +160,24 @@ func RunUsingChroot(spec *specs.Spec, bundlePath, homeDir string, stdin io.Reade
|
|||
|
||||
// Start the grandparent subprocess.
|
||||
cmd := unshare.Command(runUsingChrootCommand)
|
||||
setPdeathsig(cmd.Cmd)
|
||||
cmd.Stdin, cmd.Stdout, cmd.Stderr = stdin, stdout, stderr
|
||||
cmd.Dir = "/"
|
||||
cmd.Env = []string{fmt.Sprintf("LOGLEVEL=%d", logrus.GetLevel())}
|
||||
|
||||
interrupted := make(chan os.Signal, 100)
|
||||
cmd.Hook = func(int) error {
|
||||
signal.Notify(interrupted, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM)
|
||||
go func() {
|
||||
for receivedSignal := range interrupted {
|
||||
if err := cmd.Process.Signal(receivedSignal); err != nil {
|
||||
logrus.Infof("%v while attempting to forward %v to child process", err, receivedSignal)
|
||||
}
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
logrus.Debugf("Running %#v in %#v", cmd.Cmd, cmd)
|
||||
confwg.Add(1)
|
||||
go func() {
|
||||
|
@ -173,6 +188,8 @@ func RunUsingChroot(spec *specs.Spec, bundlePath, homeDir string, stdin io.Reade
|
|||
cmd.ExtraFiles = append([]*os.File{preader}, cmd.ExtraFiles...)
|
||||
err = cmd.Run()
|
||||
confwg.Wait()
|
||||
signal.Stop(interrupted)
|
||||
close(interrupted)
|
||||
if err == nil {
|
||||
return conferr
|
||||
}
|
||||
|
@ -571,6 +588,7 @@ func runUsingChroot(spec *specs.Spec, bundlePath string, ctty *os.File, stdin io
|
|||
|
||||
// Start the parent subprocess.
|
||||
cmd := unshare.Command(append([]string{runUsingChrootExecCommand}, spec.Process.Args...)...)
|
||||
setPdeathsig(cmd.Cmd)
|
||||
cmd.Stdin, cmd.Stdout, cmd.Stderr = stdin, stdout, stderr
|
||||
cmd.Dir = "/"
|
||||
cmd.Env = []string{fmt.Sprintf("LOGLEVEL=%d", logrus.GetLevel())}
|
||||
|
@ -593,10 +611,19 @@ func runUsingChroot(spec *specs.Spec, bundlePath string, ctty *os.File, stdin io
|
|||
}
|
||||
cmd.OOMScoreAdj = spec.Process.OOMScoreAdj
|
||||
cmd.ExtraFiles = append([]*os.File{preader}, cmd.ExtraFiles...)
|
||||
interrupted := make(chan os.Signal, 100)
|
||||
cmd.Hook = func(int) error {
|
||||
for _, f := range closeOnceRunning {
|
||||
f.Close()
|
||||
}
|
||||
signal.Notify(interrupted, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM)
|
||||
go func() {
|
||||
for receivedSignal := range interrupted {
|
||||
if err := cmd.Process.Signal(receivedSignal); err != nil {
|
||||
logrus.Infof("%v while attempting to forward %v to child process", err, receivedSignal)
|
||||
}
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -609,6 +636,8 @@ func runUsingChroot(spec *specs.Spec, bundlePath string, ctty *os.File, stdin io
|
|||
}()
|
||||
err = cmd.Run()
|
||||
confwg.Wait()
|
||||
signal.Stop(interrupted)
|
||||
close(interrupted)
|
||||
if err != nil {
|
||||
if exitError, ok := err.(*exec.ExitError); ok {
|
||||
if waitStatus, ok := exitError.ProcessState.Sys().(syscall.WaitStatus); ok {
|
||||
|
@ -792,11 +821,27 @@ func runUsingChrootExecMain() {
|
|||
|
||||
// Actually run the specified command.
|
||||
cmd := exec.Command(args[0], args[1:]...)
|
||||
setPdeathsig(cmd)
|
||||
cmd.Env = options.Spec.Process.Env
|
||||
cmd.Stdin, cmd.Stdout, cmd.Stderr = os.Stdin, os.Stdout, os.Stderr
|
||||
cmd.Dir = cwd
|
||||
logrus.Debugf("Running %#v (PATH = %q)", cmd, os.Getenv("PATH"))
|
||||
if err = cmd.Run(); err != nil {
|
||||
interrupted := make(chan os.Signal, 100)
|
||||
if err = cmd.Start(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "process failed to start with error: %v", err)
|
||||
}
|
||||
go func() {
|
||||
for range interrupted {
|
||||
if err := cmd.Process.Signal(syscall.SIGKILL); err != nil {
|
||||
logrus.Infof("%v while attempting to send SIGKILL to child process", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
signal.Notify(interrupted, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM)
|
||||
err = cmd.Wait()
|
||||
signal.Stop(interrupted)
|
||||
close(interrupted)
|
||||
if err != nil {
|
||||
if exitError, ok := err.(*exec.ExitError); ok {
|
||||
if waitStatus, ok := exitError.ProcessState.Sys().(syscall.WaitStatus); ok {
|
||||
if waitStatus.Exited() {
|
||||
|
@ -1419,3 +1464,11 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
|
|||
}
|
||||
return undoBinds, nil
|
||||
}
|
||||
|
||||
// setPdeathsig sets a parent-death signal for the process
|
||||
func setPdeathsig(cmd *exec.Cmd) {
|
||||
if cmd.SysProcAttr == nil {
|
||||
cmd.SysProcAttr = &syscall.SysProcAttr{}
|
||||
}
|
||||
cmd.SysProcAttr.Pdeathsig = syscall.SIGKILL
|
||||
}
|
||||
|
|
|
@ -100,7 +100,18 @@ make DESTDIR=%{buildroot} PREFIX=%{_prefix} install install.completions
|
|||
%{_datadir}/bash-completion/completions/*
|
||||
|
||||
%changelog
|
||||
* Fri Mar 25, 2022 Tom Sweeney <tsweeney@redhat.com> 1.26.0-dev-1
|
||||
* Wed Mar 30, 2022 Tom Sweeney <tsweeney@redhat.com> 1.26.0-dev-1
|
||||
|
||||
* Wed Mar 30, 2022 Tom Sweeney <tsweeney@redhat.com> 1.25.1-1
|
||||
- buildah: create WORKDIR with USER permissions
|
||||
- vendor: update github.com/openshift/imagebuilder
|
||||
- copier: attempt to open the dir before adding it
|
||||
- Updated dependabot to get updates for GitHub actions.
|
||||
- Switch most calls to filepath.Walk to filepath.WalkDir
|
||||
- build: allow --no-cache and --layers so build cache can be overrided
|
||||
- build(deps): bump github.com/onsi/gomega from 1.18.1 to 1.19.0
|
||||
- Bump to v1.26.0-dev
|
||||
- build(deps): bump github.com/golangci/golangci-lint in /tests/tools
|
||||
|
||||
* Fri Mar 25, 2022 Tom Sweeney <tsweeney@redhat.com> 1.25.0-1
|
||||
- install: drop RHEL/CentOS 7 doc
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
|
@ -1179,10 +1180,10 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa
|
|||
// we don't expand any of the contents that are archives
|
||||
options := req.GetOptions
|
||||
options.ExpandArchives = false
|
||||
walkfn := func(path string, info os.FileInfo, err error) error {
|
||||
walkfn := func(path string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
if options.IgnoreUnreadable && errorIsPermission(err) {
|
||||
if info != nil && info.IsDir() {
|
||||
if info != nil && d.IsDir() {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
return nil
|
||||
|
@ -1192,8 +1193,8 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa
|
|||
}
|
||||
return errors.Wrapf(err, "copier: get: error reading %q", path)
|
||||
}
|
||||
if info.Mode()&os.ModeType == os.ModeSocket {
|
||||
logrus.Warningf("copier: skipping socket %q", info.Name())
|
||||
if d.Type() == os.ModeSocket {
|
||||
logrus.Warningf("copier: skipping socket %q", d.Name())
|
||||
return nil
|
||||
}
|
||||
// compute the path of this item
|
||||
|
@ -1216,7 +1217,7 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa
|
|||
return err
|
||||
}
|
||||
if skip {
|
||||
if info.IsDir() {
|
||||
if d.IsDir() {
|
||||
// if there are no "include
|
||||
// this anyway" patterns at
|
||||
// all, we don't need to
|
||||
|
@ -1254,17 +1255,21 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa
|
|||
}
|
||||
// if it's a symlink, read its target
|
||||
symlinkTarget := ""
|
||||
if info.Mode()&os.ModeType == os.ModeSymlink {
|
||||
if d.Type() == os.ModeSymlink {
|
||||
target, err := os.Readlink(path)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "copier: get: readlink(%q(%q))", rel, path)
|
||||
}
|
||||
symlinkTarget = target
|
||||
}
|
||||
info, err := d.Info()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// if it's a directory and we're staying on one device, and it's on a
|
||||
// different device than the one we started from, skip its contents
|
||||
var ok error
|
||||
if info.Mode().IsDir() && req.GetOptions.NoCrossDevice {
|
||||
if d.IsDir() && req.GetOptions.NoCrossDevice {
|
||||
if !sameDevice(topInfo, info) {
|
||||
ok = filepath.SkipDir
|
||||
}
|
||||
|
@ -1282,7 +1287,7 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa
|
|||
return ok
|
||||
}
|
||||
// walk the directory tree, checking/adding items individually
|
||||
if err := filepath.Walk(item, walkfn); err != nil {
|
||||
if err := filepath.WalkDir(item, walkfn); err != nil {
|
||||
return errors.Wrapf(err, "copier: get: %q(%q)", queue[i], item)
|
||||
}
|
||||
itemsCopied++
|
||||
|
@ -1461,6 +1466,13 @@ func copierHandlerGetOne(srcfi os.FileInfo, symlinkTarget, name, contentPath str
|
|||
return errors.Wrapf(err, "error opening file for adding its contents to archive")
|
||||
}
|
||||
defer f.Close()
|
||||
} else if hdr.Typeflag == tar.TypeDir {
|
||||
// open the directory file first to make sure we can access it.
|
||||
f, err = os.Open(contentPath)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error opening directory for adding its contents to archive")
|
||||
}
|
||||
defer f.Close()
|
||||
}
|
||||
// output the header
|
||||
if err = tw.WriteHeader(hdr); err != nil {
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
|
@ -181,12 +182,12 @@ type enumeratedFile struct {
|
|||
var (
|
||||
testDate = time.Unix(1485449953, 0)
|
||||
|
||||
uid, gid = os.Getuid(), os.Getgid()
|
||||
uid = os.Getuid()
|
||||
|
||||
testArchiveSlice = makeArchiveSlice([]tar.Header{
|
||||
{Name: "item-0", Uid: uid, Gid: gid, Typeflag: tar.TypeReg, Size: 123, Mode: 0600, ModTime: testDate},
|
||||
{Name: "item-1", Uid: uid, Gid: gid, Typeflag: tar.TypeReg, Size: 456, Mode: 0600, ModTime: testDate},
|
||||
{Name: "item-2", Uid: uid, Gid: gid, Typeflag: tar.TypeReg, Size: 789, Mode: 0600, ModTime: testDate},
|
||||
{Name: "item-0", Typeflag: tar.TypeReg, Size: 123, Mode: 0600, ModTime: testDate},
|
||||
{Name: "item-1", Typeflag: tar.TypeReg, Size: 456, Mode: 0600, ModTime: testDate},
|
||||
{Name: "item-2", Typeflag: tar.TypeReg, Size: 789, Mode: 0600, ModTime: testDate},
|
||||
})
|
||||
|
||||
testArchives = []struct {
|
||||
|
@ -207,36 +208,36 @@ var (
|
|||
name: "regular",
|
||||
rootOnly: false,
|
||||
headers: []tar.Header{
|
||||
{Name: "file-0", Uid: uid, Gid: gid, Typeflag: tar.TypeReg, Size: 123456789, Mode: 0600, ModTime: testDate},
|
||||
{Name: "file-a", Uid: uid, Gid: gid, Typeflag: tar.TypeReg, Size: 23, Mode: 0600, ModTime: testDate},
|
||||
{Name: "file-b", Uid: uid, Gid: gid, Typeflag: tar.TypeReg, Size: 23, Mode: 0600, ModTime: testDate},
|
||||
{Name: "file-c", Uid: uid, Gid: gid, Typeflag: tar.TypeLink, Linkname: "file-a", Mode: 0600, ModTime: testDate},
|
||||
{Name: "file-u", Uid: uid, Gid: gid, Typeflag: tar.TypeReg, Size: 23, Mode: cISUID | 0755, ModTime: testDate},
|
||||
{Name: "file-g", Uid: uid, Gid: gid, Typeflag: tar.TypeReg, Size: 23, Mode: cISGID | 0755, ModTime: testDate},
|
||||
{Name: "file-t", Uid: uid, Gid: gid, Typeflag: tar.TypeReg, Size: 23, Mode: cISVTX | 0755, ModTime: testDate},
|
||||
{Name: "link-0", Uid: uid, Gid: gid, Typeflag: tar.TypeSymlink, Linkname: "../file-0", Size: 123456789, Mode: 0777, ModTime: testDate},
|
||||
{Name: "link-a", Uid: uid, Gid: gid, Typeflag: tar.TypeSymlink, Linkname: "file-a", Size: 23, Mode: 0777, ModTime: testDate},
|
||||
{Name: "link-b", Uid: uid, Gid: gid, Typeflag: tar.TypeSymlink, Linkname: "../file-a", Size: 23, Mode: 0777, ModTime: testDate},
|
||||
{Name: "hlink-0", Uid: uid, Gid: gid, Typeflag: tar.TypeLink, Linkname: "file-0", Size: 123456789, Mode: 0600, ModTime: testDate},
|
||||
{Name: "hlink-a", Uid: uid, Gid: gid, Typeflag: tar.TypeLink, Linkname: "/file-a", Size: 23, Mode: 0600, ModTime: testDate},
|
||||
{Name: "hlink-b", Uid: uid, Gid: gid, Typeflag: tar.TypeLink, Linkname: "../file-b", Size: 23, Mode: 0600, ModTime: testDate},
|
||||
{Name: "subdir-a", Uid: uid, Gid: gid, Typeflag: tar.TypeDir, Mode: 0700, ModTime: testDate},
|
||||
{Name: "subdir-a/file-n", Uid: uid, Gid: gid, Typeflag: tar.TypeReg, Size: 108, Mode: 0660, ModTime: testDate},
|
||||
{Name: "subdir-a/file-o", Uid: uid, Gid: gid, Typeflag: tar.TypeReg, Size: 34, Mode: 0660, ModTime: testDate},
|
||||
{Name: "subdir-a/file-a", Uid: uid, Gid: gid, Typeflag: tar.TypeSymlink, Linkname: "../file-a", Size: 23, Mode: 0777, ModTime: testDate},
|
||||
{Name: "subdir-a/file-b", Uid: uid, Gid: gid, Typeflag: tar.TypeSymlink, Linkname: "../../file-b", Size: 23, Mode: 0777, ModTime: testDate},
|
||||
{Name: "subdir-a/file-c", Uid: uid, Gid: gid, Typeflag: tar.TypeSymlink, Linkname: "/file-c", Size: 23, Mode: 0777, ModTime: testDate},
|
||||
{Name: "subdir-b", Uid: uid, Gid: gid, Typeflag: tar.TypeDir, Mode: 0700, ModTime: testDate},
|
||||
{Name: "subdir-b/file-n", Uid: uid, Gid: gid, Typeflag: tar.TypeReg, Size: 216, Mode: 0660, ModTime: testDate},
|
||||
{Name: "subdir-b/file-o", Uid: uid, Gid: gid, Typeflag: tar.TypeReg, Size: 45, Mode: 0660, ModTime: testDate},
|
||||
{Name: "subdir-c", Uid: uid, Gid: gid, Typeflag: tar.TypeDir, Mode: 0700, ModTime: testDate},
|
||||
{Name: "subdir-c/file-n", Uid: uid, Gid: gid, Typeflag: tar.TypeReg, Size: 432, Mode: 0666, ModTime: testDate},
|
||||
{Name: "subdir-c/file-o", Uid: uid, Gid: gid, Typeflag: tar.TypeReg, Size: 56, Mode: 0666, ModTime: testDate},
|
||||
{Name: "subdir-d", Uid: uid, Gid: gid, Typeflag: tar.TypeDir, Mode: 0700, ModTime: testDate},
|
||||
{Name: "subdir-d/hlink-0", Uid: uid, Gid: gid, Typeflag: tar.TypeLink, Linkname: "../file-0", Size: 123456789, Mode: 0600, ModTime: testDate},
|
||||
{Name: "subdir-d/hlink-a", Uid: uid, Gid: gid, Typeflag: tar.TypeLink, Linkname: "/file-a", Size: 23, Mode: 0600, ModTime: testDate},
|
||||
{Name: "subdir-d/hlink-b", Uid: uid, Gid: gid, Typeflag: tar.TypeLink, Linkname: "../../file-b", Size: 23, Mode: 0600, ModTime: testDate},
|
||||
{Name: "archive-a", Uid: uid, Gid: gid, Typeflag: tar.TypeReg, Size: 0, Mode: 0600, ModTime: testDate},
|
||||
{Name: "file-0", Typeflag: tar.TypeReg, Size: 123456789, Mode: 0600, ModTime: testDate},
|
||||
{Name: "file-a", Typeflag: tar.TypeReg, Size: 23, Mode: 0600, ModTime: testDate},
|
||||
{Name: "file-b", Typeflag: tar.TypeReg, Size: 23, Mode: 0600, ModTime: testDate},
|
||||
{Name: "file-c", Typeflag: tar.TypeLink, Linkname: "file-a", Mode: 0600, ModTime: testDate},
|
||||
{Name: "file-u", Typeflag: tar.TypeReg, Size: 23, Mode: cISUID | 0755, ModTime: testDate},
|
||||
{Name: "file-g", Typeflag: tar.TypeReg, Size: 23, Mode: cISGID | 0755, ModTime: testDate},
|
||||
{Name: "file-t", Typeflag: tar.TypeReg, Size: 23, Mode: cISVTX | 0755, ModTime: testDate},
|
||||
{Name: "link-0", Typeflag: tar.TypeSymlink, Linkname: "../file-0", Size: 123456789, Mode: 0777, ModTime: testDate},
|
||||
{Name: "link-a", Typeflag: tar.TypeSymlink, Linkname: "file-a", Size: 23, Mode: 0777, ModTime: testDate},
|
||||
{Name: "link-b", Typeflag: tar.TypeSymlink, Linkname: "../file-a", Size: 23, Mode: 0777, ModTime: testDate},
|
||||
{Name: "hlink-0", Typeflag: tar.TypeLink, Linkname: "file-0", Size: 123456789, Mode: 0600, ModTime: testDate},
|
||||
{Name: "hlink-a", Typeflag: tar.TypeLink, Linkname: "/file-a", Size: 23, Mode: 0600, ModTime: testDate},
|
||||
{Name: "hlink-b", Typeflag: tar.TypeLink, Linkname: "../file-b", Size: 23, Mode: 0600, ModTime: testDate},
|
||||
{Name: "subdir-a", Typeflag: tar.TypeDir, Mode: 0700, ModTime: testDate},
|
||||
{Name: "subdir-a/file-n", Typeflag: tar.TypeReg, Size: 108, Mode: 0660, ModTime: testDate},
|
||||
{Name: "subdir-a/file-o", Typeflag: tar.TypeReg, Size: 34, Mode: 0660, ModTime: testDate},
|
||||
{Name: "subdir-a/file-a", Typeflag: tar.TypeSymlink, Linkname: "../file-a", Size: 23, Mode: 0777, ModTime: testDate},
|
||||
{Name: "subdir-a/file-b", Typeflag: tar.TypeSymlink, Linkname: "../../file-b", Size: 23, Mode: 0777, ModTime: testDate},
|
||||
{Name: "subdir-a/file-c", Typeflag: tar.TypeSymlink, Linkname: "/file-c", Size: 23, Mode: 0777, ModTime: testDate},
|
||||
{Name: "subdir-b", Typeflag: tar.TypeDir, Mode: 0700, ModTime: testDate},
|
||||
{Name: "subdir-b/file-n", Typeflag: tar.TypeReg, Size: 216, Mode: 0660, ModTime: testDate},
|
||||
{Name: "subdir-b/file-o", Typeflag: tar.TypeReg, Size: 45, Mode: 0660, ModTime: testDate},
|
||||
{Name: "subdir-c", Typeflag: tar.TypeDir, Mode: 0700, ModTime: testDate},
|
||||
{Name: "subdir-c/file-n", Typeflag: tar.TypeReg, Size: 432, Mode: 0666, ModTime: testDate},
|
||||
{Name: "subdir-c/file-o", Typeflag: tar.TypeReg, Size: 56, Mode: 0666, ModTime: testDate},
|
||||
{Name: "subdir-d", Typeflag: tar.TypeDir, Mode: 0700, ModTime: testDate},
|
||||
{Name: "subdir-d/hlink-0", Typeflag: tar.TypeLink, Linkname: "../file-0", Size: 123456789, Mode: 0600, ModTime: testDate},
|
||||
{Name: "subdir-d/hlink-a", Typeflag: tar.TypeLink, Linkname: "/file-a", Size: 23, Mode: 0600, ModTime: testDate},
|
||||
{Name: "subdir-d/hlink-b", Typeflag: tar.TypeLink, Linkname: "../../file-b", Size: 23, Mode: 0600, ModTime: testDate},
|
||||
{Name: "archive-a", Typeflag: tar.TypeReg, Size: 0, Mode: 0600, ModTime: testDate},
|
||||
},
|
||||
contents: map[string][]byte{
|
||||
"archive-a": testArchiveSlice,
|
||||
|
@ -405,8 +406,8 @@ var (
|
|||
name: "devices",
|
||||
rootOnly: true,
|
||||
headers: []tar.Header{
|
||||
{Name: "char-dev", Uid: uid, Gid: gid, Typeflag: tar.TypeChar, Devmajor: 0, Devminor: 0, Mode: 0600, ModTime: testDate},
|
||||
{Name: "blk-dev", Uid: uid, Gid: gid, Typeflag: tar.TypeBlock, Devmajor: 0, Devminor: 0, Mode: 0600, ModTime: testDate},
|
||||
{Name: "char-dev", Typeflag: tar.TypeChar, Devmajor: 0, Devminor: 0, Mode: 0600, ModTime: testDate},
|
||||
{Name: "blk-dev", Typeflag: tar.TypeBlock, Devmajor: 0, Devminor: 0, Mode: 0600, ModTime: testDate},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -501,8 +502,8 @@ func testPut(t *testing.T) {
|
|||
require.NoErrorf(t, err, "error extracting archive %q to directory %q", testArchives[i].name, tmp)
|
||||
|
||||
var found []string
|
||||
err = filepath.Walk(tmp, func(path string, info os.FileInfo, err error) error {
|
||||
if info == nil || err != nil {
|
||||
err = filepath.WalkDir(tmp, func(path string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rel, err := filepath.Rel(tmp, path)
|
||||
|
@ -847,32 +848,32 @@ func testGetMultiple(t *testing.T) {
|
|||
{
|
||||
name: "regular",
|
||||
headers: []tar.Header{
|
||||
{Name: "file-0", Uid: uid, Gid: gid, Typeflag: tar.TypeReg, Size: 123456789, Mode: 0600},
|
||||
{Name: "file-a", Uid: uid, Gid: gid, Typeflag: tar.TypeReg, Size: 23, Mode: 0600},
|
||||
{Name: "file-b", Uid: uid, Gid: gid, Typeflag: tar.TypeReg, Size: 23, Mode: 0600},
|
||||
{Name: "link-a", Uid: uid, Gid: gid, Typeflag: tar.TypeSymlink, Linkname: "file-a", Size: 23, Mode: 0600},
|
||||
{Name: "link-c", Uid: uid, Gid: gid, Typeflag: tar.TypeSymlink, Linkname: "subdir-c", Mode: 0700, ModTime: testDate},
|
||||
{Name: "archive-a", Uid: uid, Gid: gid, Typeflag: tar.TypeReg, Size: 0, Mode: 0600},
|
||||
{Name: "non-archive-a", Uid: uid, Gid: gid, Typeflag: tar.TypeReg, Size: 1199, Mode: 0600},
|
||||
{Name: "hlink-0", Uid: uid, Gid: gid, Typeflag: tar.TypeLink, Linkname: "file-0", Size: 123456789, Mode: 0600},
|
||||
{Name: "something-a", Uid: uid, Gid: gid, Typeflag: tar.TypeReg, Size: 34, Mode: 0600},
|
||||
{Name: "subdir-a", Uid: uid, Gid: gid, Typeflag: tar.TypeDir, Mode: 0700},
|
||||
{Name: "subdir-a/file-n", Uid: uid, Gid: gid, Typeflag: tar.TypeReg, Size: 108, Mode: 0660},
|
||||
{Name: "subdir-a/file-o", Uid: uid, Gid: gid, Typeflag: tar.TypeReg, Size: 45, Mode: 0660},
|
||||
{Name: "subdir-a/file-a", Uid: uid, Gid: gid, Typeflag: tar.TypeSymlink, Linkname: "../file-a", Size: 23, Mode: 0600},
|
||||
{Name: "subdir-a/file-b", Uid: uid, Gid: gid, Typeflag: tar.TypeSymlink, Linkname: "../../file-b", Size: 23, Mode: 0600},
|
||||
{Name: "subdir-a/file-c", Uid: uid, Gid: gid, Typeflag: tar.TypeReg, Size: 56, Mode: 0600},
|
||||
{Name: "subdir-b", Uid: uid, Gid: gid, Typeflag: tar.TypeDir, Mode: 0700},
|
||||
{Name: "subdir-b/file-n", Uid: uid, Gid: gid, Typeflag: tar.TypeReg, Size: 216, Mode: 0660},
|
||||
{Name: "subdir-b/file-o", Uid: uid, Gid: gid, Typeflag: tar.TypeReg, Size: 67, Mode: 0660},
|
||||
{Name: "subdir-c", Uid: uid, Gid: gid, Typeflag: tar.TypeDir, Mode: 0700},
|
||||
{Name: "subdir-c/file-p", Uid: uid, Gid: gid, Typeflag: tar.TypeReg, Size: 432, Mode: 0666},
|
||||
{Name: "subdir-c/file-q", Uid: uid, Gid: gid, Typeflag: tar.TypeReg, Size: 78, Mode: 0666},
|
||||
{Name: "subdir-d", Uid: uid, Gid: gid, Typeflag: tar.TypeDir, Mode: 0700},
|
||||
{Name: "subdir-d/hlink-0", Uid: uid, Gid: gid, Typeflag: tar.TypeLink, Linkname: "../file-0", Size: 123456789, Mode: 0600},
|
||||
{Name: "subdir-e", Uid: uid, Gid: gid, Typeflag: tar.TypeDir, Mode: 0700},
|
||||
{Name: "subdir-e/subdir-f", Uid: uid, Gid: gid, Typeflag: tar.TypeDir, Mode: 0700},
|
||||
{Name: "subdir-e/subdir-f/hlink-b", Uid: uid, Gid: gid, Typeflag: tar.TypeLink, Linkname: "../../file-b", Size: 23, Mode: 0600},
|
||||
{Name: "file-0", Typeflag: tar.TypeReg, Size: 123456789, Mode: 0600},
|
||||
{Name: "file-a", Typeflag: tar.TypeReg, Size: 23, Mode: 0600},
|
||||
{Name: "file-b", Typeflag: tar.TypeReg, Size: 23, Mode: 0600},
|
||||
{Name: "link-a", Typeflag: tar.TypeSymlink, Linkname: "file-a", Size: 23, Mode: 0600},
|
||||
{Name: "link-c", Typeflag: tar.TypeSymlink, Linkname: "subdir-c", Mode: 0700, ModTime: testDate},
|
||||
{Name: "archive-a", Typeflag: tar.TypeReg, Size: 0, Mode: 0600},
|
||||
{Name: "non-archive-a", Typeflag: tar.TypeReg, Size: 1199, Mode: 0600},
|
||||
{Name: "hlink-0", Typeflag: tar.TypeLink, Linkname: "file-0", Size: 123456789, Mode: 0600},
|
||||
{Name: "something-a", Typeflag: tar.TypeReg, Size: 34, Mode: 0600},
|
||||
{Name: "subdir-a", Typeflag: tar.TypeDir, Mode: 0700},
|
||||
{Name: "subdir-a/file-n", Typeflag: tar.TypeReg, Size: 108, Mode: 0660},
|
||||
{Name: "subdir-a/file-o", Typeflag: tar.TypeReg, Size: 45, Mode: 0660},
|
||||
{Name: "subdir-a/file-a", Typeflag: tar.TypeSymlink, Linkname: "../file-a", Size: 23, Mode: 0600},
|
||||
{Name: "subdir-a/file-b", Typeflag: tar.TypeSymlink, Linkname: "../../file-b", Size: 23, Mode: 0600},
|
||||
{Name: "subdir-a/file-c", Typeflag: tar.TypeReg, Size: 56, Mode: 0600},
|
||||
{Name: "subdir-b", Typeflag: tar.TypeDir, Mode: 0700},
|
||||
{Name: "subdir-b/file-n", Typeflag: tar.TypeReg, Size: 216, Mode: 0660},
|
||||
{Name: "subdir-b/file-o", Typeflag: tar.TypeReg, Size: 67, Mode: 0660},
|
||||
{Name: "subdir-c", Typeflag: tar.TypeDir, Mode: 0700},
|
||||
{Name: "subdir-c/file-p", Typeflag: tar.TypeReg, Size: 432, Mode: 0666},
|
||||
{Name: "subdir-c/file-q", Typeflag: tar.TypeReg, Size: 78, Mode: 0666},
|
||||
{Name: "subdir-d", Typeflag: tar.TypeDir, Mode: 0700},
|
||||
{Name: "subdir-d/hlink-0", Typeflag: tar.TypeLink, Linkname: "../file-0", Size: 123456789, Mode: 0600},
|
||||
{Name: "subdir-e", Typeflag: tar.TypeDir, Mode: 0700},
|
||||
{Name: "subdir-e/subdir-f", Typeflag: tar.TypeDir, Mode: 0700},
|
||||
{Name: "subdir-e/subdir-f/hlink-b", Typeflag: tar.TypeLink, Linkname: "../../file-b", Size: 23, Mode: 0600},
|
||||
},
|
||||
contents: map[string][]byte{
|
||||
"archive-a": testArchiveSlice,
|
||||
|
@ -1561,8 +1562,8 @@ func testMkdir(t *testing.T) {
|
|||
root := dir
|
||||
options := MkdirOptions{ChownNew: &idtools.IDPair{UID: os.Getuid(), GID: os.Getgid()}}
|
||||
var beforeNames, afterNames []string
|
||||
err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
|
||||
if info == nil || err != nil {
|
||||
err = filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rel, err := filepath.Rel(dir, path)
|
||||
|
@ -1575,8 +1576,8 @@ func testMkdir(t *testing.T) {
|
|||
require.NoErrorf(t, err, "error walking directory to catalog pre-Mkdir contents: %v", err)
|
||||
err = Mkdir(root, testCase.create, options)
|
||||
require.NoErrorf(t, err, "error creating directory %q under %q with Mkdir: %v", testCase.create, root, err)
|
||||
err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
|
||||
if info == nil || err != nil {
|
||||
err = filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rel, err := filepath.Rel(dir, path)
|
||||
|
@ -1777,8 +1778,8 @@ func testRemove(t *testing.T) {
|
|||
root := dir
|
||||
options := RemoveOptions{All: testCase.all}
|
||||
beforeNames := make(map[string]struct{})
|
||||
err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
|
||||
if info == nil || err != nil {
|
||||
err = filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rel, err := filepath.Rel(dir, path)
|
||||
|
@ -1796,8 +1797,8 @@ func testRemove(t *testing.T) {
|
|||
}
|
||||
require.NoErrorf(t, err, "error removing item %q under %q with Remove: %v", testCase.remove, root, err)
|
||||
afterNames := make(map[string]struct{})
|
||||
err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
|
||||
if info == nil || err != nil {
|
||||
err = filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rel, err := filepath.Rel(dir, path)
|
||||
|
|
4
go.mod
4
go.mod
|
@ -23,11 +23,11 @@ require (
|
|||
github.com/onsi/gomega v1.19.0
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/opencontainers/image-spec v1.0.3-0.20211202193544-a5463b7f9c84
|
||||
github.com/opencontainers/runc v1.1.0
|
||||
github.com/opencontainers/runc v1.1.1
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417
|
||||
github.com/opencontainers/runtime-tools v0.9.0
|
||||
github.com/opencontainers/selinux v1.10.0
|
||||
github.com/openshift/imagebuilder v1.2.2
|
||||
github.com/openshift/imagebuilder v1.2.3
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/prometheus/client_golang v1.11.1 // indirect
|
||||
github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921
|
||||
|
|
7
go.sum
7
go.sum
|
@ -860,8 +860,9 @@ github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rm
|
|||
github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0=
|
||||
github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
|
||||
github.com/opencontainers/runc v1.0.3/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
|
||||
github.com/opencontainers/runc v1.1.0 h1:O9+X96OcDjkmmZyfaG996kV7yq8HsoU2h1XRRQcefG8=
|
||||
github.com/opencontainers/runc v1.1.0/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc=
|
||||
github.com/opencontainers/runc v1.1.1 h1:PJ9DSs2sVwE0iVr++pAHE6QkS9tzcVWozlPifdwMgrU=
|
||||
github.com/opencontainers/runc v1.1.1/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc=
|
||||
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
|
@ -878,8 +879,8 @@ github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xA
|
|||
github.com/opencontainers/selinux v1.8.5/go.mod h1:HTvjPFoGMbpQsG886e3lQwnsRWtE4TC1OF3OUvG9FAo=
|
||||
github.com/opencontainers/selinux v1.10.0 h1:rAiKF8hTcgLI3w0DHm6i0ylVVcOrlgR1kK99DRLDhyU=
|
||||
github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
|
||||
github.com/openshift/imagebuilder v1.2.2 h1:++jWWMkTVJKP2MIjTPaTk2MqwWIOYYlDaQbZyLlLBh0=
|
||||
github.com/openshift/imagebuilder v1.2.2/go.mod h1:TRYHe4CH9U6nkDjxjBNM5klrLbJBrRbpJE5SaRwUBsQ=
|
||||
github.com/openshift/imagebuilder v1.2.3 h1:jvA7mESJdclRKkTe3Yl6UWlliFNVW6mLY8RI+Rrfhfo=
|
||||
github.com/openshift/imagebuilder v1.2.3/go.mod h1:TRYHe4CH9U6nkDjxjBNM5klrLbJBrRbpJE5SaRwUBsQ=
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc=
|
||||
github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f h1:/UDgs8FGMqwnHagNDPGOlts35QkhAZ8by3DR7nMih7M=
|
||||
|
|
|
@ -1570,5 +1570,9 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer
|
|||
}
|
||||
|
||||
func (s *StageExecutor) EnsureContainerPath(path string) error {
|
||||
return copier.Mkdir(s.mountPoint, filepath.Join(s.mountPoint, path), copier.MkdirOptions{})
|
||||
return s.builder.EnsureContainerPathAs(path, "", nil)
|
||||
}
|
||||
|
||||
func (s *StageExecutor) EnsureContainerPathAs(path, user string, mode *os.FileMode) error {
|
||||
return s.builder.EnsureContainerPathAs(path, user, mode)
|
||||
}
|
||||
|
|
120
run_linux.go
120
run_linux.go
|
@ -12,6 +12,7 @@ import (
|
|||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
|
@ -191,16 +192,19 @@ func (b *Builder) Run(command []string, options RunOptions) error {
|
|||
return err
|
||||
}
|
||||
|
||||
// Figure out who owns files that will appear to be owned by UID/GID 0 in the container.
|
||||
rootUID, rootGID, err := util.GetHostRootIDs(spec)
|
||||
if err != nil {
|
||||
return err
|
||||
uid, gid := spec.Process.User.UID, spec.Process.User.GID
|
||||
if spec.Linux != nil {
|
||||
uid, gid, err = util.GetHostIDs(spec.Linux.UIDMappings, spec.Linux.GIDMappings, uid, gid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
rootIDPair := &idtools.IDPair{UID: int(rootUID), GID: int(rootGID)}
|
||||
|
||||
idPair := &idtools.IDPair{UID: int(uid), GID: int(gid)}
|
||||
|
||||
mode := os.FileMode(0755)
|
||||
coptions := copier.MkdirOptions{
|
||||
ChownNew: rootIDPair,
|
||||
ChownNew: idPair,
|
||||
ChmodNew: &mode,
|
||||
}
|
||||
if err := copier.Mkdir(mountPoint, filepath.Join(mountPoint, spec.Process.Cwd), coptions); err != nil {
|
||||
|
@ -211,6 +215,13 @@ func (b *Builder) Run(command []string, options RunOptions) error {
|
|||
namespaceOptions := append(b.NamespaceOptions, options.NamespaceOptions...)
|
||||
volumes := b.Volumes()
|
||||
|
||||
// Figure out who owns files that will appear to be owned by UID/GID 0 in the container.
|
||||
rootUID, rootGID, err := util.GetHostRootIDs(spec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rootIDPair := &idtools.IDPair{UID: int(rootUID), GID: int(rootGID)}
|
||||
|
||||
if !options.NoHosts && !contains(volumes, "/etc/hosts") {
|
||||
hostFile, err := b.generateHosts(path, spec.Hostname, b.CommonBuildOpts.AddHost, rootIDPair)
|
||||
if err != nil {
|
||||
|
@ -290,9 +301,7 @@ rootless=%d
|
|||
case define.IsolationOCI:
|
||||
var moreCreateArgs []string
|
||||
if options.NoPivot {
|
||||
moreCreateArgs = []string{"--no-pivot"}
|
||||
} else {
|
||||
moreCreateArgs = nil
|
||||
moreCreateArgs = append(moreCreateArgs, "--no-pivot")
|
||||
}
|
||||
err = b.runUsingRuntimeSubproc(isolation, options, configureNetwork, configureNetworks, moreCreateArgs, spec, mountPoint, path, define.Package+"-"+filepath.Base(path))
|
||||
case IsolationChroot:
|
||||
|
@ -829,7 +838,7 @@ func runUsingRuntime(options RunOptions, configureNetwork bool, moreCreateArgs [
|
|||
if err = unix.Pipe(finishCopy); err != nil {
|
||||
return 1, errors.Wrapf(err, "error creating pipe for notifying to stop stdio")
|
||||
}
|
||||
finishedCopy := make(chan struct{})
|
||||
finishedCopy := make(chan struct{}, 1)
|
||||
var pargs []string
|
||||
if spec.Process != nil {
|
||||
pargs = spec.Process.Args
|
||||
|
@ -885,22 +894,27 @@ func runUsingRuntime(options RunOptions, configureNetwork bool, moreCreateArgs [
|
|||
pidFile := filepath.Join(bundlePath, "pid")
|
||||
args := append(append(append(runtimeArgs, "create", "--bundle", bundlePath, "--pid-file", pidFile), moreCreateArgs...), containerName)
|
||||
create := exec.Command(runtime, args...)
|
||||
setPdeathsig(create)
|
||||
create.Dir = bundlePath
|
||||
stdin, stdout, stderr := getCreateStdio()
|
||||
create.Stdin, create.Stdout, create.Stderr = stdin, stdout, stderr
|
||||
if create.SysProcAttr == nil {
|
||||
create.SysProcAttr = &syscall.SysProcAttr{}
|
||||
}
|
||||
|
||||
args = append(options.Args, "start", containerName)
|
||||
start := exec.Command(runtime, args...)
|
||||
setPdeathsig(start)
|
||||
start.Dir = bundlePath
|
||||
start.Stderr = os.Stderr
|
||||
|
||||
args = append(options.Args, "kill", containerName)
|
||||
kill := exec.Command(runtime, args...)
|
||||
kill.Dir = bundlePath
|
||||
kill.Stderr = os.Stderr
|
||||
kill := func(signal string) *exec.Cmd {
|
||||
args := append(options.Args, "kill", containerName)
|
||||
if signal != "" {
|
||||
args = append(args, signal)
|
||||
}
|
||||
kill := exec.Command(runtime, args...)
|
||||
kill.Dir = bundlePath
|
||||
kill.Stderr = os.Stderr
|
||||
return kill
|
||||
}
|
||||
|
||||
args = append(options.Args, "delete", containerName)
|
||||
del := exec.Command(runtime, args...)
|
||||
|
@ -981,13 +995,23 @@ func runUsingRuntime(options RunOptions, configureNetwork bool, moreCreateArgs [
|
|||
}
|
||||
defer func() {
|
||||
if atomic.LoadUint32(&stopped) == 0 {
|
||||
if err2 := kill.Run(); err2 != nil {
|
||||
options.Logger.Infof("error from %s stopping container: %v", runtime, err2)
|
||||
if err := kill("").Run(); err != nil {
|
||||
options.Logger.Infof("error from %s stopping container: %v", runtime, err)
|
||||
}
|
||||
atomic.StoreUint32(&stopped, 1)
|
||||
}
|
||||
}()
|
||||
|
||||
// Wait for the container to exit.
|
||||
interrupted := make(chan os.Signal, 100)
|
||||
go func() {
|
||||
for range interrupted {
|
||||
if err := kill("SIGKILL").Run(); err != nil {
|
||||
logrus.Errorf("%v sending SIGKILL", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
signal.Notify(interrupted, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM)
|
||||
for {
|
||||
now := time.Now()
|
||||
var state specs.State
|
||||
|
@ -1026,6 +1050,8 @@ func runUsingRuntime(options RunOptions, configureNetwork bool, moreCreateArgs [
|
|||
break
|
||||
}
|
||||
}
|
||||
signal.Stop(interrupted)
|
||||
close(interrupted)
|
||||
|
||||
// Close the writing end of the stop-handling-stdio notification pipe.
|
||||
unix.Close(finishCopy[1])
|
||||
|
@ -1112,6 +1138,7 @@ func setupRootlessNetwork(pid int) (teardown func(), err error) {
|
|||
}
|
||||
|
||||
cmd := exec.Command(slirp4netns, "--mtu", "65520", "-r", "3", "-c", strconv.Itoa(pid), "tap0")
|
||||
setPdeathsig(cmd)
|
||||
cmd.Stdin, cmd.Stdout, cmd.Stderr = nil, nil, nil
|
||||
cmd.ExtraFiles = []*os.File{rootlessSlirpSyncW}
|
||||
|
||||
|
@ -1229,6 +1256,7 @@ func runCopyStdio(logger *logrus.Logger, stdio *sync.WaitGroup, copyPipes bool,
|
|||
}
|
||||
stdio.Done()
|
||||
finishedCopy <- struct{}{}
|
||||
close(finishedCopy)
|
||||
}()
|
||||
// Map describing where data on an incoming descriptor should go.
|
||||
relayMap := make(map[int]int)
|
||||
|
@ -2227,6 +2255,7 @@ func (b *Builder) runUsingRuntimeSubproc(isolation define.Isolation, options Run
|
|||
return errors.Wrapf(conferr, "error encoding configuration for %q", runUsingRuntimeCommand)
|
||||
}
|
||||
cmd := reexec.Command(runUsingRuntimeCommand)
|
||||
setPdeathsig(cmd)
|
||||
cmd.Dir = bundlePath
|
||||
cmd.Stdin = options.Stdin
|
||||
if cmd.Stdin == nil {
|
||||
|
@ -2255,23 +2284,23 @@ func (b *Builder) runUsingRuntimeSubproc(isolation define.Isolation, options Run
|
|||
}()
|
||||
|
||||
// create network configuration pipes
|
||||
var containerCreateR, containerCreateW *os.File
|
||||
var containerStartR, containerStartW *os.File
|
||||
var containerCreateR, containerCreateW fileCloser
|
||||
var containerStartR, containerStartW fileCloser
|
||||
if configureNetwork {
|
||||
containerCreateR, containerCreateW, err = os.Pipe()
|
||||
containerCreateR.file, containerCreateW.file, err = os.Pipe()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error creating container create pipe")
|
||||
}
|
||||
defer containerCreateR.Close()
|
||||
defer containerCreateW.Close()
|
||||
|
||||
containerStartR, containerStartW, err = os.Pipe()
|
||||
containerStartR.file, containerStartW.file, err = os.Pipe()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error creating container create pipe")
|
||||
}
|
||||
defer containerStartR.Close()
|
||||
defer containerStartW.Close()
|
||||
cmd.ExtraFiles = []*os.File{containerCreateW, containerStartR}
|
||||
cmd.ExtraFiles = []*os.File{containerCreateW.file, containerStartR.file}
|
||||
}
|
||||
|
||||
cmd.ExtraFiles = append([]*os.File{preader}, cmd.ExtraFiles...)
|
||||
|
@ -2281,8 +2310,20 @@ func (b *Builder) runUsingRuntimeSubproc(isolation define.Isolation, options Run
|
|||
return errors.Wrapf(err, "error while starting runtime")
|
||||
}
|
||||
|
||||
interrupted := make(chan os.Signal, 100)
|
||||
go func() {
|
||||
for receivedSignal := range interrupted {
|
||||
if err := cmd.Process.Signal(receivedSignal); err != nil {
|
||||
logrus.Infof("%v while attempting to forward %v to child process", err, receivedSignal)
|
||||
}
|
||||
}
|
||||
}()
|
||||
signal.Notify(interrupted, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
if configureNetwork {
|
||||
if err := waitForSync(containerCreateR); err != nil {
|
||||
// we already passed the fd to the child, now close the writer so we do not hang if the child closes it
|
||||
containerCreateW.Close()
|
||||
if err := waitForSync(containerCreateR.file); err != nil {
|
||||
// we do not want to return here since we want to capture the exit code from the child via cmd.Wait()
|
||||
// close the pipes here so that the child will not hang forever
|
||||
containerCreateR.Close()
|
||||
|
@ -2308,16 +2349,19 @@ func (b *Builder) runUsingRuntimeSubproc(isolation define.Isolation, options Run
|
|||
}
|
||||
|
||||
logrus.Debug("network namespace successfully setup, send start message to child")
|
||||
_, err = containerStartW.Write([]byte{1})
|
||||
_, err = containerStartW.file.Write([]byte{1})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := cmd.Wait(); err != nil {
|
||||
return errors.Wrapf(err, "error while running runtime")
|
||||
}
|
||||
confwg.Wait()
|
||||
signal.Stop(interrupted)
|
||||
close(interrupted)
|
||||
if err == nil {
|
||||
return conferr
|
||||
}
|
||||
|
@ -2327,6 +2371,22 @@ func (b *Builder) runUsingRuntimeSubproc(isolation define.Isolation, options Run
|
|||
return err
|
||||
}
|
||||
|
||||
// fileCloser is a helper struct to prevent closing the file twice in the code
|
||||
// users must call (fileCloser).Close() and not fileCloser.File.Close()
|
||||
type fileCloser struct {
|
||||
file *os.File
|
||||
closed bool
|
||||
}
|
||||
|
||||
func (f *fileCloser) Close() {
|
||||
if !f.closed {
|
||||
if err := f.file.Close(); err != nil {
|
||||
logrus.Errorf("failed to close file: %v", err)
|
||||
}
|
||||
f.closed = true
|
||||
}
|
||||
}
|
||||
|
||||
// waitForSync waits for a maximum of 4 minutes to read something from the file
|
||||
func waitForSync(pipeR *os.File) error {
|
||||
if err := pipeR.SetDeadline(time.Now().Add(4 * time.Minute)); err != nil {
|
||||
|
@ -2899,3 +2959,11 @@ func getNetworkInterface(store storage.Store, cniConfDir, cniPluginPath string)
|
|||
}
|
||||
return netInt, nil
|
||||
}
|
||||
|
||||
// setPdeathsig sets a parent-death signal for the process
|
||||
func setPdeathsig(cmd *exec.Cmd) {
|
||||
if cmd.SysProcAttr == nil {
|
||||
cmd.SysProcAttr = &syscall.SysProcAttr{}
|
||||
}
|
||||
cmd.SysProcAttr.Pdeathsig = syscall.SIGKILL
|
||||
}
|
||||
|
|
|
@ -3157,6 +3157,23 @@ _EOF
|
|||
|
||||
}
|
||||
|
||||
@test "bud - invalid runtime flags test" {
|
||||
skip_if_no_runtime
|
||||
skip_if_chroot
|
||||
|
||||
_prefetch alpine
|
||||
|
||||
mytmpdir=${TESTDIR}/my-dir
|
||||
mkdir -p ${mytmpdir}
|
||||
cat > $mytmpdir/Containerfile << _EOF
|
||||
from alpine
|
||||
run echo hello
|
||||
_EOF
|
||||
|
||||
run_buildah 1 build --signature-policy ${TESTSDIR}/policy.json --runtime-flag invalidflag -t build_test $mytmpdir .
|
||||
assert "$output" =~ ".*invalidflag" "failed when passing undefined flags to the runtime"
|
||||
}
|
||||
|
||||
@test "bud with --add-host" {
|
||||
skip_if_no_runtime
|
||||
|
||||
|
@ -4003,3 +4020,36 @@ _EOF
|
|||
expect_output --substring "10.88."
|
||||
fi
|
||||
}
|
||||
|
||||
@test "bud WORKDIR owned by USER" {
|
||||
_prefetch alpine
|
||||
target=alpine-image
|
||||
ctr=alpine-ctr
|
||||
run_buildah build --signature-policy ${TESTSDIR}/policy.json -t ${target} ${TESTSDIR}/bud/workdir-user
|
||||
expect_output --substring "1000:1000 /home/http/public"
|
||||
}
|
||||
|
||||
@test "build interruption" {
|
||||
skip_if_no_runtime
|
||||
|
||||
_prefetch alpine
|
||||
|
||||
mkfifo ${TESTDIR}/pipe
|
||||
# start the build running in the background - don't use the function wrapper because that sets '$!' to a value that's not what we want
|
||||
${BUILDAH_BINARY} ${BUILDAH_REGISTRY_OPTS} ${ROOTDIR_OPTS} --signature-policy ${TESTSDIR}/policy.json build ${TESTSDIR}/bud/long-sleep > ${TESTDIR}/pipe 2>&1 &
|
||||
buildah_pid="${!}"
|
||||
echo buildah is pid ${buildah_pid}
|
||||
# save what's written to the fifo to a plain file
|
||||
coproc cat ${TESTDIR}/pipe > ${TESTDIR}/log
|
||||
cat_pid="${COPROC_PID}"
|
||||
echo cat is pid ${cat_pid}
|
||||
# kill the buildah process early
|
||||
sleep 30
|
||||
kill -s SIGINT "${buildah_pid}"
|
||||
# wait for output to stop getting written from anywhere
|
||||
wait "${buildah_pid}" "${cat_pid}"
|
||||
echo log:
|
||||
cat ${TESTDIR}/log
|
||||
echo checking:
|
||||
! grep 'not fully killed' ${TESTDIR}/log
|
||||
}
|
||||
|
|
|
@ -0,0 +1,4 @@
|
|||
FROM alpine
|
||||
# this can be a long long time, since the test should kill it long before this has elapsed
|
||||
RUN sleep 300
|
||||
RUN echo not fully killed
|
|
@ -7,3 +7,6 @@ RUN echo "StatSomefileResult" && stat -c '%u:%g' /somefile
|
|||
COPY somedir /somedir
|
||||
RUN echo "StatSomedirResult" && stat -c '%u:%g' /somedir
|
||||
RUN echo "StatSomeotherfileResult" && stat -c '%u:%g %a' /somedir/someotherfile
|
||||
USER guest
|
||||
WORKDIR /new-workdir
|
||||
RUN echo "StatNewWorkdir" && stat -c '%U:%G' $PWD
|
||||
|
|
|
@ -0,0 +1,6 @@
|
|||
FROM alpine
|
||||
RUN adduser -D http -h /home/http
|
||||
USER http
|
||||
WORKDIR /home/http/public
|
||||
RUN stat -c '%u:%g %n' $PWD
|
||||
RUN touch foobar
|
|
@ -2959,4 +2959,16 @@ var internalTestCases = []testCase{
|
|||
dockerfile: "Dockerfile.4",
|
||||
fsSkip: []string{"(dir):tree:mtime"},
|
||||
},
|
||||
|
||||
{
|
||||
name: "workdir-owner", // from issue #3620
|
||||
dockerfileContents: strings.Join([]string{
|
||||
`FROM alpine`,
|
||||
`USER daemon`,
|
||||
`WORKDIR /created/directory`,
|
||||
`RUN ls /created`,
|
||||
}, "\n"),
|
||||
fsSkip: []string{"(dir):created:mtime", "(dir):created:(dir):directory:mtime"},
|
||||
dockerUseBuildKit: true,
|
||||
},
|
||||
}
|
||||
|
|
|
@ -262,9 +262,11 @@ idmapping_check_permission() {
|
|||
# Check that if we copy a directory into the container, its contents get the right permissions.
|
||||
output_dir_stat="$(grep -A1 'StatSomedirResult' <<< "$output" | tail -n1)"
|
||||
output_otherfile_stat="$(grep -A1 'StatSomeotherfileResult' <<< "$output" | tail -n1)"
|
||||
output_workdir_stat="$(grep -A1 'StatNewWorkdir' <<< "$output" | tail -n1)"
|
||||
# bud strips suid.
|
||||
idmapping_check_permission "$output_file_stat" "$output_dir_stat"
|
||||
expect_output --from="${output_otherfile_stat}" "0:0 700" "stat(someotherfile), in bud test"
|
||||
expect_output --from="${output_workdir_stat}" "guest:users" "stat(new-workdir), in bud test"
|
||||
done
|
||||
}
|
||||
|
||||
|
|
|
@ -44,6 +44,7 @@ type Run struct {
|
|||
type Executor interface {
|
||||
Preserve(path string) error
|
||||
EnsureContainerPath(path string) error
|
||||
EnsureContainerPathAs(path, user string, mode *os.FileMode) error
|
||||
Copy(excludes []string, copies ...Copy) error
|
||||
Run(run Run, config docker.Config) error
|
||||
UnrecognizedInstruction(step *Step) error
|
||||
|
@ -61,6 +62,15 @@ func (logExecutor) EnsureContainerPath(path string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (logExecutor) EnsureContainerPathAs(path, user string, mode *os.FileMode) error {
|
||||
if mode != nil {
|
||||
log.Printf("ENSURE %s AS %q with MODE=%q", path, user, *mode)
|
||||
} else {
|
||||
log.Printf("ENSURE %s AS %q", path, user)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (logExecutor) Copy(excludes []string, copies ...Copy) error {
|
||||
for _, c := range copies {
|
||||
log.Printf("COPY %v -> %s (from:%s download:%t), chown: %s, chmod %s", c.Src, c.Dest, c.From, c.Download, c.Chown, c.Chmod)
|
||||
|
@ -88,6 +98,10 @@ func (noopExecutor) EnsureContainerPath(path string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (noopExecutor) EnsureContainerPathAs(path, user string, mode *os.FileMode) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (noopExecutor) Copy(excludes []string, copies ...Copy) error {
|
||||
return nil
|
||||
}
|
||||
|
@ -378,7 +392,7 @@ func (b *Builder) Run(step *Step, exec Executor, noRunsRemaining bool) error {
|
|||
}
|
||||
|
||||
if len(b.RunConfig.WorkingDir) > 0 {
|
||||
if err := exec.EnsureContainerPath(b.RunConfig.WorkingDir); err != nil {
|
||||
if err := exec.EnsureContainerPathAs(b.RunConfig.WorkingDir, b.RunConfig.User, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@ package dockerclient
|
|||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
@ -21,6 +22,7 @@ import (
|
|||
)
|
||||
|
||||
var isArchivePath = archive.IsArchivePath
|
||||
var dstNeedsToBeDirectoryError = errors.New("copying would overwrite content that was already copied; destination needs to be a directory")
|
||||
|
||||
// TransformFileFunc is given a chance to transform an arbitrary input file.
|
||||
type TransformFileFunc func(h *tar.Header, r io.Reader) (data []byte, update bool, skip bool, err error)
|
||||
|
@ -48,7 +50,7 @@ func FilterArchive(r io.Reader, w io.Writer, fn TransformFileFunc) error {
|
|||
var body io.Reader = tr
|
||||
name := h.Name
|
||||
data, ok, skip, err := fn(h, tr)
|
||||
klog.V(6).Infof("Transform %s -> %s: data=%t ok=%t skip=%t err=%v", name, h.Name, data != nil, ok, skip, err)
|
||||
klog.V(6).Infof("Transform %s(0%o) -> %s: data=%t ok=%t skip=%t err=%v", name, h.Mode, h.Name, data != nil, ok, skip, err)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -183,7 +185,7 @@ func archiveFromDisk(directory string, src, dst string, allowDownload bool, excl
|
|||
directory = filepath.Dir(directory)
|
||||
}
|
||||
|
||||
options, err := archiveOptionsFor(infos, dst, excludes, check)
|
||||
options, err := archiveOptionsFor(directory, infos, dst, excludes, check)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
@ -305,7 +307,7 @@ func archiveFromFile(file string, src, dst string, excludes []string, check Dire
|
|||
pw.CloseWithError(err)
|
||||
}
|
||||
|
||||
mapper, _, err := newArchiveMapper(src, dst, excludes, false, true, check, refetch)
|
||||
mapper, _, err := newArchiveMapper(src, dst, excludes, false, true, check, refetch, true)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
@ -318,7 +320,7 @@ func archiveFromFile(file string, src, dst string, excludes []string, check Dire
|
|||
r, err := transformArchive(f, true, mapper.Filter)
|
||||
cc := newCloser(func() error {
|
||||
err := f.Close()
|
||||
if !mapper.foundItems {
|
||||
if mapper.foundItems == 0 {
|
||||
return fmt.Errorf("%s: %w", src, os.ErrNotExist)
|
||||
}
|
||||
return err
|
||||
|
@ -326,15 +328,15 @@ func archiveFromFile(file string, src, dst string, excludes []string, check Dire
|
|||
return r, cc, err
|
||||
}
|
||||
|
||||
func archiveFromContainer(in io.Reader, src, dst string, excludes []string, check DirectoryCheck, refetch FetchArchiveFunc) (io.ReadCloser, string, error) {
|
||||
mapper, archiveRoot, err := newArchiveMapper(src, dst, excludes, true, false, check, refetch)
|
||||
func archiveFromContainer(in io.Reader, src, dst string, excludes []string, check DirectoryCheck, refetch FetchArchiveFunc, assumeDstIsDirectory bool) (io.ReadCloser, string, error) {
|
||||
mapper, archiveRoot, err := newArchiveMapper(src, dst, excludes, true, false, check, refetch, assumeDstIsDirectory)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
r, err := transformArchive(in, false, mapper.Filter)
|
||||
rc := readCloser{Reader: r, Closer: newCloser(func() error {
|
||||
if !mapper.foundItems {
|
||||
if mapper.foundItems == 0 {
|
||||
return fmt.Errorf("%s: %w", src, os.ErrNotExist)
|
||||
}
|
||||
return nil
|
||||
|
@ -365,7 +367,7 @@ func transformArchive(r io.Reader, compressed bool, fn TransformFileFunc) (io.Re
|
|||
// a (dir) -> test/
|
||||
// a (file) -> test/
|
||||
//
|
||||
func archivePathMapper(src, dst string, isDestDir bool) (fn func(name string, isDir bool) (string, bool)) {
|
||||
func archivePathMapper(src, dst string, isDestDir bool) (fn func(itemCount *int, name string, isDir bool) (string, bool, error)) {
|
||||
srcPattern := filepath.Clean(src)
|
||||
if srcPattern == "." {
|
||||
srcPattern = "*"
|
||||
|
@ -376,33 +378,38 @@ func archivePathMapper(src, dst string, isDestDir bool) (fn func(name string, is
|
|||
|
||||
// no wildcards
|
||||
if !containsWildcards(pattern) {
|
||||
return func(name string, isDir bool) (string, bool) {
|
||||
return func(itemCount *int, name string, isDir bool) (string, bool, error) {
|
||||
// when extracting from the working directory, Docker prefaces with ./
|
||||
if strings.HasPrefix(name, "."+string(filepath.Separator)) {
|
||||
name = name[2:]
|
||||
}
|
||||
if name == srcPattern {
|
||||
if isDir {
|
||||
return "", false
|
||||
if isDir { // the source is a directory: this directory; skip it
|
||||
return "", false, nil
|
||||
}
|
||||
if isDestDir {
|
||||
return filepath.Join(dst, filepath.Base(name)), true
|
||||
if isDestDir { // the destination is a directory, put this under it
|
||||
return filepath.Join(dst, filepath.Base(name)), true, nil
|
||||
}
|
||||
return dst, true
|
||||
// the source is a non-directory: copy to the destination's name
|
||||
if itemCount != nil && *itemCount != 0 { // but we've already written something there
|
||||
return "", false, dstNeedsToBeDirectoryError // tell the caller to start over
|
||||
}
|
||||
return dst, true, nil
|
||||
}
|
||||
|
||||
// source is a directory, this is under it; put this under the destination directory
|
||||
remainder := strings.TrimPrefix(name, srcPattern+string(filepath.Separator))
|
||||
if remainder == name {
|
||||
return "", false
|
||||
return "", false, nil
|
||||
}
|
||||
return filepath.Join(dst, remainder), true
|
||||
return filepath.Join(dst, remainder), true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// root with pattern
|
||||
prefix := filepath.Dir(srcPattern)
|
||||
if prefix == "." {
|
||||
return func(name string, isDir bool) (string, bool) {
|
||||
return func(itemCount *int, name string, isDir bool) (string, bool, error) {
|
||||
// match only on the first segment under the prefix
|
||||
var firstSegment = name
|
||||
if i := strings.Index(name, string(filepath.Separator)); i != -1 {
|
||||
|
@ -410,18 +417,24 @@ func archivePathMapper(src, dst string, isDestDir bool) (fn func(name string, is
|
|||
}
|
||||
ok, _ := filepath.Match(pattern, firstSegment)
|
||||
if !ok {
|
||||
return "", false
|
||||
return "", false, nil
|
||||
}
|
||||
return filepath.Join(dst, name), true
|
||||
if !isDestDir && !isDir { // the destination is not a directory, put this right there
|
||||
if itemCount != nil && *itemCount != 0 { // but we've already written something there
|
||||
return "", false, dstNeedsToBeDirectoryError // tell the caller to start over
|
||||
}
|
||||
return dst, true, nil
|
||||
}
|
||||
return filepath.Join(dst, name), true, nil
|
||||
}
|
||||
}
|
||||
prefix += string(filepath.Separator)
|
||||
|
||||
// nested with pattern
|
||||
return func(name string, isDir bool) (string, bool) {
|
||||
return func(_ *int, name string, isDir bool) (string, bool, error) {
|
||||
remainder := strings.TrimPrefix(name, prefix)
|
||||
if remainder == name {
|
||||
return "", false
|
||||
return "", false, nil
|
||||
}
|
||||
// match only on the first segment under the prefix
|
||||
var firstSegment = remainder
|
||||
|
@ -430,31 +443,31 @@ func archivePathMapper(src, dst string, isDestDir bool) (fn func(name string, is
|
|||
}
|
||||
ok, _ := filepath.Match(pattern, firstSegment)
|
||||
if !ok {
|
||||
return "", false
|
||||
return "", false, nil
|
||||
}
|
||||
return filepath.Join(dst, remainder), true
|
||||
return filepath.Join(dst, remainder), true, nil
|
||||
}
|
||||
}
|
||||
|
||||
type archiveMapper struct {
|
||||
exclude *fileutils.PatternMatcher
|
||||
rename func(name string, isDir bool) (string, bool)
|
||||
rename func(itemCount *int, name string, isDir bool) (string, bool, error)
|
||||
prefix string
|
||||
dst string
|
||||
resetDstMode bool
|
||||
resetOwners bool
|
||||
foundItems bool
|
||||
foundItems int
|
||||
refetch FetchArchiveFunc
|
||||
renameLinks map[string]string
|
||||
}
|
||||
|
||||
func newArchiveMapper(src, dst string, excludes []string, resetDstMode, resetOwners bool, check DirectoryCheck, refetch FetchArchiveFunc) (*archiveMapper, string, error) {
|
||||
func newArchiveMapper(src, dst string, excludes []string, resetDstMode, resetOwners bool, check DirectoryCheck, refetch FetchArchiveFunc, assumeDstIsDirectory bool) (*archiveMapper, string, error) {
|
||||
ex, err := fileutils.NewPatternMatcher(excludes)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
isDestDir := strings.HasSuffix(dst, "/") || path.Base(dst) == "."
|
||||
isDestDir := strings.HasSuffix(dst, "/") || path.Base(dst) == "." || strings.HasSuffix(src, "/") || path.Base(src) == "." || assumeDstIsDirectory
|
||||
dst = path.Clean(dst)
|
||||
if !isDestDir && check != nil {
|
||||
isDir, err := check.IsDirectory(dst)
|
||||
|
@ -518,7 +531,10 @@ func (m *archiveMapper) Filter(h *tar.Header, r io.Reader) ([]byte, bool, bool,
|
|||
|
||||
// skip a file if it doesn't match the src
|
||||
isDir := h.Typeflag == tar.TypeDir
|
||||
newName, ok := m.rename(h.Name, isDir)
|
||||
newName, ok, err := m.rename(&m.foundItems, h.Name, isDir)
|
||||
if err != nil {
|
||||
return nil, false, true, err
|
||||
}
|
||||
if !ok {
|
||||
return nil, false, true, nil
|
||||
}
|
||||
|
@ -530,7 +546,7 @@ func (m *archiveMapper) Filter(h *tar.Header, r io.Reader) ([]byte, bool, bool,
|
|||
return nil, false, true, nil
|
||||
}
|
||||
|
||||
m.foundItems = true
|
||||
m.foundItems++
|
||||
|
||||
h.Name = newName
|
||||
|
||||
|
@ -556,7 +572,10 @@ func (m *archiveMapper) Filter(h *tar.Header, r io.Reader) ([]byte, bool, bool,
|
|||
if !needReplacement {
|
||||
linkName = strings.TrimPrefix(strings.TrimPrefix(linkName, m.prefix), "/")
|
||||
var ok bool
|
||||
if newTarget, ok = m.rename(linkName, false); !ok || newTarget == "." {
|
||||
if newTarget, ok, err = m.rename(nil, linkName, false); err != nil {
|
||||
return nil, false, true, err
|
||||
}
|
||||
if !ok || newTarget == "." {
|
||||
// the link target wasn't passed along
|
||||
needReplacement = true
|
||||
}
|
||||
|
@ -616,7 +635,7 @@ func (m *archiveMapper) Filter(h *tar.Header, r io.Reader) ([]byte, bool, bool,
|
|||
return nil, false, false, nil
|
||||
}
|
||||
|
||||
func archiveOptionsFor(infos []CopyInfo, dst string, excludes []string, check DirectoryCheck) (*archive.TarOptions, error) {
|
||||
func archiveOptionsFor(directory string, infos []CopyInfo, dst string, excludes []string, check DirectoryCheck) (*archive.TarOptions, error) {
|
||||
dst = trimLeadingPath(dst)
|
||||
dstIsDir := strings.HasSuffix(dst, "/") || dst == "." || dst == "/" || strings.HasSuffix(dst, "/.")
|
||||
dst = trimTrailingSlash(dst)
|
||||
|
@ -639,6 +658,22 @@ func archiveOptionsFor(infos []CopyInfo, dst string, excludes []string, check Di
|
|||
return options, nil
|
||||
}
|
||||
|
||||
if !dstIsDir {
|
||||
for _, info := range infos {
|
||||
if ok, _ := pm.Matches(info.Path); ok {
|
||||
continue
|
||||
}
|
||||
infoPath := info.Path
|
||||
if directory != "" {
|
||||
infoPath = filepath.Join(directory, infoPath)
|
||||
}
|
||||
if isArchivePath(infoPath) {
|
||||
dstIsDir = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, info := range infos {
|
||||
if ok, _ := pm.Matches(info.Path); ok {
|
||||
continue
|
||||
|
|
|
@ -682,10 +682,26 @@ func (e *ClientExecutor) Preserve(path string) error {
|
|||
}
|
||||
|
||||
func (e *ClientExecutor) EnsureContainerPath(path string) error {
|
||||
return e.createOrReplaceContainerPathWithOwner(path, 0, 0)
|
||||
return e.createOrReplaceContainerPathWithOwner(path, 0, 0, nil)
|
||||
}
|
||||
|
||||
func (e *ClientExecutor) createOrReplaceContainerPathWithOwner(path string, uid, gid int) error {
|
||||
func (e *ClientExecutor) EnsureContainerPathAs(path, user string, mode *os.FileMode) error {
|
||||
uid, gid := 0, 0
|
||||
|
||||
u, g, err := e.getUser(user)
|
||||
if err == nil {
|
||||
uid = u
|
||||
gid = g
|
||||
}
|
||||
|
||||
return e.createOrReplaceContainerPathWithOwner(path, uid, gid, mode)
|
||||
}
|
||||
|
||||
func (e *ClientExecutor) createOrReplaceContainerPathWithOwner(path string, uid, gid int, mode *os.FileMode) error {
|
||||
if mode == nil {
|
||||
m := os.FileMode(0755)
|
||||
mode = &m
|
||||
}
|
||||
createPath := func(dest string) error {
|
||||
var writerErr error
|
||||
if !strings.HasSuffix(dest, "/") {
|
||||
|
@ -704,7 +720,7 @@ func (e *ClientExecutor) createOrReplaceContainerPathWithOwner(path string, uid,
|
|||
writerErr = tarball.WriteHeader(&tar.Header{
|
||||
Name: dest,
|
||||
Typeflag: tar.TypeDir,
|
||||
Mode: 0755,
|
||||
Mode: int64(*mode),
|
||||
Uid: uid,
|
||||
Gid: gid,
|
||||
})
|
||||
|
@ -848,21 +864,22 @@ func (e *ClientExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) er
|
|||
return e.CopyContainer(e.Container, excludes, copies...)
|
||||
}
|
||||
|
||||
// CopyContainer copies the provided content into a destination container.
|
||||
func (e *ClientExecutor) CopyContainer(container *docker.Container, excludes []string, copies ...imagebuilder.Copy) error {
|
||||
chownUid, chownGid := -1, -1
|
||||
chown := func(h *tar.Header, r io.Reader) (data []byte, update bool, skip bool, err error) {
|
||||
if chownUid != -1 {
|
||||
h.Uid = chownUid
|
||||
func (e *ClientExecutor) findMissingParents(container *docker.Container, dest string) (parents []string, err error) {
|
||||
destParent := filepath.Clean(dest)
|
||||
for filepath.Dir(destParent) != destParent {
|
||||
exists, err := isContainerPathDirectory(e.Client, container.ID, destParent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if chownGid != -1 {
|
||||
h.Gid = chownGid
|
||||
if !exists {
|
||||
parents = append(parents, destParent)
|
||||
}
|
||||
if (h.Uid > 0x1fffff || h.Gid > 0x1fffff) && h.Format == tar.FormatUSTAR {
|
||||
h.Format = tar.FormatPAX
|
||||
}
|
||||
return nil, false, false, nil
|
||||
destParent = filepath.Dir(destParent)
|
||||
}
|
||||
return parents, nil
|
||||
}
|
||||
|
||||
func (e *ClientExecutor) getUser(userspec string) (int, int, error) {
|
||||
readFile := func(path string) ([]byte, error) {
|
||||
var buffer, contents bytes.Buffer
|
||||
if err := e.Client.DownloadFromContainer(e.Container.ID, docker.DownloadFromContainerOptions{
|
||||
|
@ -922,89 +939,96 @@ func (e *ClientExecutor) CopyContainer(container *docker.Container, excludes []s
|
|||
}
|
||||
return *value, nil
|
||||
}
|
||||
findMissingParents := func(dest string) (parents []string, err error) {
|
||||
destParent := filepath.Clean(dest)
|
||||
for filepath.Dir(destParent) != destParent {
|
||||
exists, err := isContainerPathDirectory(e.Client, container.ID, destParent)
|
||||
|
||||
spec := strings.SplitN(userspec, ":", 2)
|
||||
if len(spec) == 2 {
|
||||
parsedUid, err := strconv.ParseUint(spec[0], 10, 32)
|
||||
if err != nil {
|
||||
// maybe it's a user name? look up the UID
|
||||
passwdFile, err := readFile("/etc/passwd")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return -1, -1, err
|
||||
}
|
||||
if !exists {
|
||||
parents = append(parents, destParent)
|
||||
uid, err := parse(passwdFile, 0, spec[0], 7, 2)
|
||||
if err != nil {
|
||||
return -1, -1, fmt.Errorf("error reading UID value from passwd file for --chown=%s: %v", spec[0], err)
|
||||
}
|
||||
parsedUid, err = strconv.ParseUint(uid, 10, 32)
|
||||
if err != nil {
|
||||
return -1, -1, fmt.Errorf("error parsing UID value %q from passwd file for --chown=%s", uid, userspec)
|
||||
}
|
||||
destParent = filepath.Dir(destParent)
|
||||
}
|
||||
return parents, nil
|
||||
parsedGid, err := strconv.ParseUint(spec[1], 10, 32)
|
||||
if err != nil {
|
||||
// maybe it's a group name? look up the GID
|
||||
groupFile, err := readFile("/etc/group")
|
||||
if err != nil {
|
||||
return -1, -1, err
|
||||
}
|
||||
gid, err := parse(groupFile, 0, spec[1], 4, 2)
|
||||
if err != nil {
|
||||
return -1, -1, err
|
||||
}
|
||||
parsedGid, err = strconv.ParseUint(gid, 10, 32)
|
||||
if err != nil {
|
||||
return -1, -1, fmt.Errorf("error parsing GID value %q from group file for --chown=%s", gid, userspec)
|
||||
}
|
||||
}
|
||||
return int(parsedUid), int(parsedGid), nil
|
||||
}
|
||||
|
||||
var parsedUid, parsedGid uint64
|
||||
if id, err := strconv.ParseUint(spec[0], 10, 32); err == nil {
|
||||
// it's an ID. use it as both the UID and the GID
|
||||
parsedUid = id
|
||||
parsedGid = id
|
||||
} else {
|
||||
// it's a user name, we'll need to look up their UID and primary GID
|
||||
passwdFile, err := readFile("/etc/passwd")
|
||||
if err != nil {
|
||||
return -1, -1, err
|
||||
}
|
||||
// read the UID and primary GID
|
||||
uid, err := parse(passwdFile, 0, spec[0], 7, 2)
|
||||
if err != nil {
|
||||
return -1, -1, fmt.Errorf("error reading UID value from /etc/passwd for --chown=%s", userspec)
|
||||
}
|
||||
gid, err := parse(passwdFile, 0, spec[0], 7, 3)
|
||||
if err != nil {
|
||||
return -1, -1, fmt.Errorf("error reading GID value from /etc/passwd for --chown=%s", userspec)
|
||||
}
|
||||
if parsedUid, err = strconv.ParseUint(uid, 10, 32); err != nil {
|
||||
return -1, -1, fmt.Errorf("error parsing UID value %q from /etc/passwd for --chown=%s", uid, userspec)
|
||||
}
|
||||
if parsedGid, err = strconv.ParseUint(gid, 10, 32); err != nil {
|
||||
return -1, -1, fmt.Errorf("error parsing GID value %q from /etc/passwd for --chown=%s", gid, userspec)
|
||||
}
|
||||
}
|
||||
return int(parsedUid), int(parsedGid), nil
|
||||
}
|
||||
|
||||
// CopyContainer copies the provided content into a destination container.
|
||||
func (e *ClientExecutor) CopyContainer(container *docker.Container, excludes []string, copies ...imagebuilder.Copy) error {
|
||||
chownUid, chownGid := -1, -1
|
||||
chown := func(h *tar.Header, r io.Reader) (data []byte, update bool, skip bool, err error) {
|
||||
if chownUid != -1 {
|
||||
h.Uid = chownUid
|
||||
}
|
||||
if chownGid != -1 {
|
||||
h.Gid = chownGid
|
||||
}
|
||||
if (h.Uid > 0x1fffff || h.Gid > 0x1fffff) && h.Format == tar.FormatUSTAR {
|
||||
h.Format = tar.FormatPAX
|
||||
}
|
||||
return nil, false, false, nil
|
||||
}
|
||||
for _, c := range copies {
|
||||
chownUid, chownGid = -1, -1
|
||||
if c.Chown != "" {
|
||||
spec := strings.SplitN(c.Chown, ":", 2)
|
||||
if len(spec) == 2 {
|
||||
parsedUid, err := strconv.ParseUint(spec[0], 10, 32)
|
||||
if err != nil {
|
||||
// maybe it's a user name? look up the UID
|
||||
passwdFile, err := readFile("/etc/passwd")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
uid, err := parse(passwdFile, 0, spec[0], 7, 2)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading UID value from passwd file for --chown=%s: %v", spec[0], err)
|
||||
}
|
||||
parsedUid, err = strconv.ParseUint(uid, 10, 32)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing UID value %q from passwd file for --chown=%s", uid, c.Chown)
|
||||
}
|
||||
}
|
||||
parsedGid, err := strconv.ParseUint(spec[1], 10, 32)
|
||||
if err != nil {
|
||||
// maybe it's a group name? look up the GID
|
||||
groupFile, err := readFile("/etc/group")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
gid, err := parse(groupFile, 0, spec[1], 4, 2)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
parsedGid, err = strconv.ParseUint(gid, 10, 32)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing GID value %q from group file for --chown=%s", gid, c.Chown)
|
||||
}
|
||||
}
|
||||
chownUid = int(parsedUid)
|
||||
chownGid = int(parsedGid)
|
||||
} else {
|
||||
var parsedUid, parsedGid uint64
|
||||
if id, err := strconv.ParseUint(spec[0], 10, 32); err == nil {
|
||||
// it's an ID. use it as both the UID and the GID
|
||||
parsedUid = id
|
||||
parsedGid = id
|
||||
} else {
|
||||
// it's a user name, we'll need to look up their UID and primary GID
|
||||
passwdFile, err := readFile("/etc/passwd")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// read the UID and primary GID
|
||||
uid, err := parse(passwdFile, 0, spec[0], 7, 2)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading UID value from /etc/passwd for --chown=%s", c.Chown)
|
||||
}
|
||||
gid, err := parse(passwdFile, 0, spec[0], 7, 3)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading GID value from /etc/passwd for --chown=%s", c.Chown)
|
||||
}
|
||||
if parsedUid, err = strconv.ParseUint(uid, 10, 32); err != nil {
|
||||
return fmt.Errorf("error parsing UID value %q from /etc/passwd for --chown=%s", uid, c.Chown)
|
||||
}
|
||||
if parsedGid, err = strconv.ParseUint(gid, 10, 32); err != nil {
|
||||
return fmt.Errorf("error parsing GID value %q from /etc/passwd for --chown=%s", gid, c.Chown)
|
||||
}
|
||||
}
|
||||
chownUid = int(parsedUid)
|
||||
chownGid = int(parsedGid)
|
||||
var err error
|
||||
chownUid, chownGid, err = e.getUser(c.Chown)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// TODO: reuse source
|
||||
|
@ -1012,12 +1036,20 @@ func (e *ClientExecutor) CopyContainer(container *docker.Container, excludes []s
|
|||
if src == "" {
|
||||
src = "*"
|
||||
}
|
||||
assumeDstIsDirectory := len(c.Src) > 1
|
||||
repeatThisSrc:
|
||||
klog.V(4).Infof("Archiving %s download=%t fromFS=%t from=%s", src, c.Download, c.FromFS, c.From)
|
||||
var r io.Reader
|
||||
var closer io.Closer
|
||||
var err error
|
||||
if len(c.From) > 0 {
|
||||
r, closer, err = e.archiveFromContainer(c.From, src, c.Dest)
|
||||
if !assumeDstIsDirectory {
|
||||
var err error
|
||||
if assumeDstIsDirectory, err = e.isContainerGlobMultiple(e.Client, c.From, src); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
r, closer, err = e.archiveFromContainer(c.From, src, c.Dest, assumeDstIsDirectory)
|
||||
} else {
|
||||
r, closer, err = e.Archive(c.FromFS, src, c.Dest, c.Download, excludes)
|
||||
}
|
||||
|
@ -1027,7 +1059,11 @@ func (e *ClientExecutor) CopyContainer(container *docker.Container, excludes []s
|
|||
asOwner := ""
|
||||
if c.Chown != "" {
|
||||
asOwner = fmt.Sprintf(" as %d:%d", chownUid, chownGid)
|
||||
missingParents, err := findMissingParents(c.Dest)
|
||||
// the daemon would implicitly create missing
|
||||
// directories with the wrong ownership, so
|
||||
// check for any that don't exist and create
|
||||
// them ourselves
|
||||
missingParents, err := e.findMissingParents(container, c.Dest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1035,7 +1071,7 @@ func (e *ClientExecutor) CopyContainer(container *docker.Container, excludes []s
|
|||
sort.Strings(missingParents)
|
||||
klog.V(5).Infof("Uploading directories %v to %s%s", missingParents, container.ID, asOwner)
|
||||
for _, missingParent := range missingParents {
|
||||
if err := e.createOrReplaceContainerPathWithOwner(missingParent, chownUid, chownGid); err != nil {
|
||||
if err := e.createOrReplaceContainerPathWithOwner(missingParent, chownUid, chownGid, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -1050,6 +1086,13 @@ func (e *ClientExecutor) CopyContainer(container *docker.Container, excludes []s
|
|||
if klog.V(6) {
|
||||
logArchiveOutput(r, "Archive file for %s")
|
||||
}
|
||||
// add a workaround allow us to notice if a
|
||||
// dstNeedsToBeDirectoryError was returned while
|
||||
// attempting to read the data we're uploading,
|
||||
// indicating that we thought the content would be just
|
||||
// one item, but it actually isn't
|
||||
reader := &readErrorWrapper{Reader: r}
|
||||
r = reader
|
||||
err = e.Client.UploadToContainer(container.ID, docker.UploadToContainerOptions{
|
||||
InputStream: r,
|
||||
Path: "/",
|
||||
|
@ -1058,6 +1101,13 @@ func (e *ClientExecutor) CopyContainer(container *docker.Container, excludes []s
|
|||
klog.Errorf("Error while closing stream container copy stream %s: %v", container.ID, err)
|
||||
}
|
||||
if err != nil {
|
||||
if errors.Is(reader.err, dstNeedsToBeDirectoryError) && !assumeDstIsDirectory {
|
||||
assumeDstIsDirectory = true
|
||||
goto repeatThisSrc
|
||||
}
|
||||
if apiErr, ok := err.(*docker.Error); ok && apiErr.Status == 404 {
|
||||
klog.V(4).Infof("path %s did not exist in container %s: %v", src, container.ID, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -1065,6 +1115,16 @@ func (e *ClientExecutor) CopyContainer(container *docker.Container, excludes []s
|
|||
return nil
|
||||
}
|
||||
|
||||
type readErrorWrapper struct {
|
||||
io.Reader
|
||||
err error
|
||||
}
|
||||
|
||||
func (r *readErrorWrapper) Read(p []byte) (n int, err error) {
|
||||
n, r.err = r.Reader.Read(p)
|
||||
return n, r.err
|
||||
}
|
||||
|
||||
type closers []func() error
|
||||
|
||||
func (c closers) Close() error {
|
||||
|
@ -1077,7 +1137,7 @@ func (c closers) Close() error {
|
|||
return lastErr
|
||||
}
|
||||
|
||||
func (e *ClientExecutor) archiveFromContainer(from string, src, dst string) (io.Reader, io.Closer, error) {
|
||||
func (e *ClientExecutor) archiveFromContainer(from string, src, dst string, multipleSources bool) (io.Reader, io.Closer, error) {
|
||||
var containerID string
|
||||
if other, ok := e.Named[from]; ok {
|
||||
if other.Container == nil {
|
||||
|
@ -1114,7 +1174,7 @@ func (e *ClientExecutor) archiveFromContainer(from string, src, dst string) (io.
|
|||
})
|
||||
pw.CloseWithError(err)
|
||||
}
|
||||
ar, archiveRoot, err := archiveFromContainer(pr, src, dst, nil, check, fetch)
|
||||
ar, archiveRoot, err := archiveFromContainer(pr, src, dst, nil, check, fetch, multipleSources)
|
||||
if err != nil {
|
||||
pr.Close()
|
||||
pw.Close()
|
||||
|
@ -1132,6 +1192,51 @@ func (e *ClientExecutor) archiveFromContainer(from string, src, dst string) (io.
|
|||
return &readCloser{Reader: ar, Closer: closer}, pr, nil
|
||||
}
|
||||
|
||||
func (e *ClientExecutor) isContainerGlobMultiple(client *docker.Client, from, glob string) (bool, error) {
|
||||
reader, closer, err := e.archiveFromContainer(from, glob, "/ignored", true)
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
defer closer.Close()
|
||||
tr := tar.NewReader(reader)
|
||||
|
||||
h, err := tr.Next()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
err = nil
|
||||
} else {
|
||||
if apiErr, ok := err.(*docker.Error); ok && apiErr.Status == 404 {
|
||||
klog.V(4).Infof("path %s did not exist in container %s: %v", glob, e.Container.ID, err)
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
klog.V(4).Infof("Retrieved first header from %s using glob %s: %#v", from, glob, h)
|
||||
|
||||
h, err = tr.Next()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
err = nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
klog.V(4).Infof("Retrieved second header from %s using glob %s: %#v", from, glob, h)
|
||||
|
||||
// take the remainder of the input and discard it
|
||||
go func() {
|
||||
n, err := io.Copy(ioutil.Discard, reader)
|
||||
if n > 0 || err != nil {
|
||||
klog.V(6).Infof("Discarded %d bytes from end of from glob check, and got error: %v", n, err)
|
||||
}
|
||||
}()
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (e *ClientExecutor) Archive(fromFS bool, src, dst string, allowDownload bool, excludes []string) (io.Reader, io.Closer, error) {
|
||||
var check DirectoryCheck
|
||||
if e.Container != nil {
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
#
|
||||
|
||||
%global golang_version 1.8.1
|
||||
%{!?version: %global version 1.2.2-dev}
|
||||
%{!?version: %global version 1.2.3}
|
||||
%{!?release: %global release 1}
|
||||
%global package_name imagebuilder
|
||||
%global product_name Container Image Builder
|
||||
|
|
|
@ -400,7 +400,7 @@ github.com/opencontainers/go-digest
|
|||
# github.com/opencontainers/image-spec v1.0.3-0.20211202193544-a5463b7f9c84 => github.com/opencontainers/image-spec v1.0.2-0.20211123152302-43a7dee1ec31
|
||||
github.com/opencontainers/image-spec/specs-go
|
||||
github.com/opencontainers/image-spec/specs-go/v1
|
||||
# github.com/opencontainers/runc v1.1.0
|
||||
# github.com/opencontainers/runc v1.1.1
|
||||
github.com/opencontainers/runc/libcontainer/apparmor
|
||||
github.com/opencontainers/runc/libcontainer/devices
|
||||
github.com/opencontainers/runc/libcontainer/user
|
||||
|
@ -420,7 +420,7 @@ github.com/opencontainers/selinux/go-selinux
|
|||
github.com/opencontainers/selinux/go-selinux/label
|
||||
github.com/opencontainers/selinux/pkg/pwalk
|
||||
github.com/opencontainers/selinux/pkg/pwalkdir
|
||||
# github.com/openshift/imagebuilder v1.2.2
|
||||
# github.com/openshift/imagebuilder v1.2.3
|
||||
github.com/openshift/imagebuilder
|
||||
github.com/openshift/imagebuilder/dockerclient
|
||||
github.com/openshift/imagebuilder/dockerfile/command
|
||||
|
|
Loading…
Reference in New Issue