2019-06-28 16:43:50 +08:00
|
|
|
package imagebuildah
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"io/ioutil"
|
|
|
|
"os"
|
|
|
|
"sort"
|
|
|
|
"strconv"
|
|
|
|
"strings"
|
2020-06-22 16:53:02 +08:00
|
|
|
"sync"
|
2019-11-28 00:31:02 +08:00
|
|
|
"time"
|
2019-06-28 16:43:50 +08:00
|
|
|
|
|
|
|
"github.com/containers/buildah"
|
2021-02-07 06:49:40 +08:00
|
|
|
"github.com/containers/buildah/define"
|
2020-02-08 01:54:18 +08:00
|
|
|
"github.com/containers/buildah/pkg/parse"
|
2021-07-26 14:07:23 +08:00
|
|
|
"github.com/containers/buildah/pkg/sshagent"
|
2019-06-28 16:43:50 +08:00
|
|
|
"github.com/containers/buildah/util"
|
2021-04-11 01:44:51 +08:00
|
|
|
"github.com/containers/common/libimage"
|
2022-01-06 04:36:49 +08:00
|
|
|
nettypes "github.com/containers/common/libnetwork/types"
|
2020-02-08 01:54:18 +08:00
|
|
|
"github.com/containers/common/pkg/config"
|
2019-10-26 05:19:30 +08:00
|
|
|
"github.com/containers/image/v5/docker/reference"
|
2020-10-31 01:47:23 +08:00
|
|
|
"github.com/containers/image/v5/manifest"
|
2019-10-26 05:19:30 +08:00
|
|
|
is "github.com/containers/image/v5/storage"
|
2021-04-11 01:44:51 +08:00
|
|
|
storageTransport "github.com/containers/image/v5/storage"
|
2019-10-26 05:19:30 +08:00
|
|
|
"github.com/containers/image/v5/transports"
|
|
|
|
"github.com/containers/image/v5/transports/alltransports"
|
|
|
|
"github.com/containers/image/v5/types"
|
2020-04-02 02:15:56 +08:00
|
|
|
encconfig "github.com/containers/ocicrypt/config"
|
2019-06-28 16:43:50 +08:00
|
|
|
"github.com/containers/storage"
|
|
|
|
"github.com/containers/storage/pkg/archive"
|
2020-09-02 02:41:22 +08:00
|
|
|
digest "github.com/opencontainers/go-digest"
|
2019-06-28 16:43:50 +08:00
|
|
|
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
|
|
|
"github.com/openshift/imagebuilder"
|
|
|
|
"github.com/openshift/imagebuilder/dockerfile/parser"
|
|
|
|
"github.com/pkg/errors"
|
|
|
|
"github.com/sirupsen/logrus"
|
2020-06-22 16:53:02 +08:00
|
|
|
"golang.org/x/sync/semaphore"
|
2019-06-28 16:43:50 +08:00
|
|
|
)
|
|
|
|
|
|
|
|
// builtinAllowedBuildArgs is list of built-in allowed build args. Normally we
|
|
|
|
// complain if we're given values for arguments which have no corresponding ARG
|
|
|
|
// instruction in the Dockerfile, since that's usually an indication of a user
|
|
|
|
// error, but for these values we make exceptions and ignore them.
|
|
|
|
var builtinAllowedBuildArgs = map[string]bool{
|
|
|
|
"HTTP_PROXY": true,
|
|
|
|
"http_proxy": true,
|
|
|
|
"HTTPS_PROXY": true,
|
|
|
|
"https_proxy": true,
|
|
|
|
"FTP_PROXY": true,
|
|
|
|
"ftp_proxy": true,
|
|
|
|
"NO_PROXY": true,
|
|
|
|
"no_proxy": true,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Executor is a buildah-based implementation of the imagebuilder.Executor
|
2019-08-10 04:21:24 +08:00
|
|
|
// interface. It coordinates the entire build by using one or more
|
|
|
|
// StageExecutors to handle each stage of the build.
|
2019-06-28 16:43:50 +08:00
|
|
|
type Executor struct {
|
2021-12-15 06:11:32 +08:00
|
|
|
containerSuffix string
|
2021-05-08 01:38:44 +08:00
|
|
|
logger *logrus.Logger
|
2019-06-28 16:43:50 +08:00
|
|
|
stages map[string]*StageExecutor
|
|
|
|
store storage.Store
|
|
|
|
contextDir string
|
2021-02-07 06:49:40 +08:00
|
|
|
pullPolicy define.PullPolicy
|
2019-06-28 16:43:50 +08:00
|
|
|
registry string
|
|
|
|
ignoreUnrecognizedInstructions bool
|
|
|
|
quiet bool
|
|
|
|
runtime string
|
|
|
|
runtimeArgs []string
|
|
|
|
transientMounts []Mount
|
|
|
|
compression archive.Compression
|
|
|
|
output string
|
|
|
|
outputFormat string
|
|
|
|
additionalTags []string
|
2020-10-09 01:41:23 +08:00
|
|
|
log func(format string, args ...interface{}) // can be nil
|
2019-06-28 16:43:50 +08:00
|
|
|
in io.Reader
|
|
|
|
out io.Writer
|
|
|
|
err io.Writer
|
|
|
|
signaturePolicyPath string
|
|
|
|
systemContext *types.SystemContext
|
|
|
|
reportWriter io.Writer
|
2021-02-07 06:49:40 +08:00
|
|
|
isolation define.Isolation
|
|
|
|
namespaceOptions []define.NamespaceOption
|
|
|
|
configureNetwork define.NetworkConfigurationPolicy
|
2019-06-28 16:43:50 +08:00
|
|
|
cniPluginPath string
|
|
|
|
cniConfigDir string
|
2022-01-06 04:36:49 +08:00
|
|
|
// NetworkInterface is the libnetwork network interface used to setup CNI or netavark networks.
|
|
|
|
networkInterface nettypes.ContainerNetwork
|
|
|
|
idmappingOptions *define.IDMappingOptions
|
|
|
|
commonBuildOptions *define.CommonBuildOptions
|
|
|
|
defaultMountsFilePath string
|
|
|
|
iidfile string
|
|
|
|
squash bool
|
|
|
|
labels []string
|
|
|
|
annotations []string
|
|
|
|
layers bool
|
|
|
|
useCache bool
|
|
|
|
removeIntermediateCtrs bool
|
|
|
|
forceRmIntermediateCtrs bool
|
|
|
|
imageMap map[string]string // Used to map images that we create to handle the AS construct.
|
|
|
|
containerMap map[string]*buildah.Builder // Used to map from image names to only-created-for-the-rootfs containers.
|
|
|
|
baseMap map[string]bool // Holds the names of every base image, as given.
|
|
|
|
rootfsMap map[string]bool // Holds the names of every stage whose rootfs is referenced in a COPY or ADD instruction.
|
|
|
|
blobDirectory string
|
|
|
|
excludes []string
|
|
|
|
ignoreFile string
|
|
|
|
unusedArgs map[string]struct{}
|
|
|
|
capabilities []string
|
|
|
|
devices define.ContainerDevices
|
|
|
|
signBy string
|
|
|
|
architecture string
|
|
|
|
timestamp *time.Time
|
|
|
|
os string
|
|
|
|
maxPullPushRetries int
|
|
|
|
retryPullPushDelay time.Duration
|
|
|
|
ociDecryptConfig *encconfig.DecryptConfig
|
|
|
|
lastError error
|
|
|
|
terminatedStage map[string]error
|
|
|
|
stagesLock sync.Mutex
|
|
|
|
stagesSemaphore *semaphore.Weighted
|
|
|
|
logRusage bool
|
|
|
|
rusageLogFile io.Writer
|
|
|
|
imageInfoLock sync.Mutex
|
|
|
|
imageInfoCache map[string]imageTypeAndHistoryAndDiffIDs
|
|
|
|
fromOverride string
|
|
|
|
manifest string
|
|
|
|
secrets map[string]define.Secret
|
|
|
|
sshsources map[string]*sshagent.Source
|
|
|
|
logPrefix string
|
|
|
|
unsetEnvs []string
|
2022-01-09 20:29:26 +08:00
|
|
|
processLabel string // Shares processLabel of first stage container with containers of other stages in same build
|
|
|
|
mountLabel string // Shares mountLabel of first stage container with containers of other stages in same build
|
2020-11-06 06:54:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
type imageTypeAndHistoryAndDiffIDs struct {
|
|
|
|
manifestType string
|
|
|
|
history []v1.History
|
|
|
|
diffIDs []digest.Digest
|
|
|
|
err error
|
2019-06-28 16:43:50 +08:00
|
|
|
}
|
|
|
|
|
2021-08-11 06:11:15 +08:00
|
|
|
// newExecutor creates a new instance of the imagebuilder.Executor interface.
|
|
|
|
func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, options define.BuildOptions, mainNode *parser.Node) (*Executor, error) {
|
2020-02-08 01:54:18 +08:00
|
|
|
defaultContainerConfig, err := config.Default()
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrapf(err, "failed to get container config")
|
|
|
|
}
|
|
|
|
|
2020-11-19 21:14:58 +08:00
|
|
|
excludes := options.Excludes
|
|
|
|
if len(excludes) == 0 {
|
2021-10-07 21:10:22 +08:00
|
|
|
excludes, options.IgnoreFile, err = parse.ContainerIgnoreFile(options.ContextDirectory, options.IgnoreFile)
|
2020-11-19 21:14:58 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-06-28 16:43:50 +08:00
|
|
|
}
|
2020-05-01 23:25:14 +08:00
|
|
|
capabilities, err := defaultContainerConfig.Capabilities("", options.AddCapabilities, options.DropCapabilities)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-02-08 01:54:18 +08:00
|
|
|
|
2021-02-07 06:49:40 +08:00
|
|
|
devices := define.ContainerDevices{}
|
2020-02-21 06:15:30 +08:00
|
|
|
for _, device := range append(defaultContainerConfig.Containers.Devices, options.Devices...) {
|
2020-02-08 01:54:18 +08:00
|
|
|
dev, err := parse.DeviceFromPath(device)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
devices = append(dev, devices...)
|
|
|
|
}
|
|
|
|
|
|
|
|
transientMounts := []Mount{}
|
2020-02-21 06:15:30 +08:00
|
|
|
for _, volume := range append(defaultContainerConfig.Containers.Volumes, options.TransientMounts...) {
|
2020-02-08 01:54:18 +08:00
|
|
|
mount, err := parse.Volume(volume)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2021-07-21 04:23:25 +08:00
|
|
|
transientMounts = append([]Mount{mount}, transientMounts...)
|
2020-02-08 01:54:18 +08:00
|
|
|
}
|
2019-06-28 16:43:50 +08:00
|
|
|
|
2021-04-17 06:21:31 +08:00
|
|
|
secrets, err := parse.Secrets(options.CommonBuildOpts.Secrets)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2021-07-26 14:07:23 +08:00
|
|
|
sshsources, err := parse.SSH(options.CommonBuildOpts.SSHSources)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-07-02 14:07:10 +08:00
|
|
|
|
2020-08-24 21:14:17 +08:00
|
|
|
writer := options.ReportWriter
|
|
|
|
if options.Quiet {
|
|
|
|
writer = ioutil.Discard
|
|
|
|
}
|
|
|
|
|
2021-06-29 01:19:37 +08:00
|
|
|
var rusageLogFile io.Writer
|
|
|
|
|
2021-05-28 23:09:27 +08:00
|
|
|
if options.LogRusage && !options.Quiet {
|
|
|
|
if options.RusageLogFile == "" {
|
2021-06-29 01:19:37 +08:00
|
|
|
rusageLogFile = options.Out
|
2021-05-28 23:09:27 +08:00
|
|
|
} else {
|
|
|
|
rusageLogFile, err = os.OpenFile(options.RusageLogFile, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-28 16:43:50 +08:00
|
|
|
exec := Executor{
|
2021-12-15 06:11:32 +08:00
|
|
|
containerSuffix: options.ContainerSuffix,
|
2021-05-08 01:38:44 +08:00
|
|
|
logger: logger,
|
2020-07-03 01:12:33 +08:00
|
|
|
stages: make(map[string]*StageExecutor),
|
2019-06-28 16:43:50 +08:00
|
|
|
store: store,
|
|
|
|
contextDir: options.ContextDirectory,
|
|
|
|
excludes: excludes,
|
2021-10-07 21:10:22 +08:00
|
|
|
ignoreFile: options.IgnoreFile,
|
2019-06-28 16:43:50 +08:00
|
|
|
pullPolicy: options.PullPolicy,
|
|
|
|
registry: options.Registry,
|
|
|
|
ignoreUnrecognizedInstructions: options.IgnoreUnrecognizedInstructions,
|
|
|
|
quiet: options.Quiet,
|
|
|
|
runtime: options.Runtime,
|
|
|
|
runtimeArgs: options.RuntimeArgs,
|
2020-02-08 01:54:18 +08:00
|
|
|
transientMounts: transientMounts,
|
2019-06-28 16:43:50 +08:00
|
|
|
compression: options.Compression,
|
|
|
|
output: options.Output,
|
|
|
|
outputFormat: options.OutputFormat,
|
|
|
|
additionalTags: options.AdditionalTags,
|
|
|
|
signaturePolicyPath: options.SignaturePolicyPath,
|
|
|
|
systemContext: options.SystemContext,
|
|
|
|
log: options.Log,
|
|
|
|
in: options.In,
|
|
|
|
out: options.Out,
|
|
|
|
err: options.Err,
|
2020-08-24 21:14:17 +08:00
|
|
|
reportWriter: writer,
|
2019-06-28 16:43:50 +08:00
|
|
|
isolation: options.Isolation,
|
|
|
|
namespaceOptions: options.NamespaceOptions,
|
|
|
|
configureNetwork: options.ConfigureNetwork,
|
|
|
|
cniPluginPath: options.CNIPluginPath,
|
|
|
|
cniConfigDir: options.CNIConfigDir,
|
2022-01-06 04:36:49 +08:00
|
|
|
networkInterface: options.NetworkInterface,
|
2019-06-28 16:43:50 +08:00
|
|
|
idmappingOptions: options.IDMappingOptions,
|
|
|
|
commonBuildOptions: options.CommonBuildOpts,
|
|
|
|
defaultMountsFilePath: options.DefaultMountsFilePath,
|
|
|
|
iidfile: options.IIDFile,
|
|
|
|
squash: options.Squash,
|
|
|
|
labels: append([]string{}, options.Labels...),
|
|
|
|
annotations: append([]string{}, options.Annotations...),
|
|
|
|
layers: options.Layers,
|
|
|
|
useCache: !options.NoCache,
|
|
|
|
removeIntermediateCtrs: options.RemoveIntermediateCtrs,
|
|
|
|
forceRmIntermediateCtrs: options.ForceRmIntermediateCtrs,
|
|
|
|
imageMap: make(map[string]string),
|
|
|
|
containerMap: make(map[string]*buildah.Builder),
|
|
|
|
baseMap: make(map[string]bool),
|
|
|
|
rootfsMap: make(map[string]bool),
|
|
|
|
blobDirectory: options.BlobDirectory,
|
|
|
|
unusedArgs: make(map[string]struct{}),
|
2020-02-08 01:54:18 +08:00
|
|
|
capabilities: capabilities,
|
|
|
|
devices: devices,
|
2020-01-16 01:23:38 +08:00
|
|
|
signBy: options.SignBy,
|
2020-01-20 19:31:09 +08:00
|
|
|
architecture: options.Architecture,
|
2020-08-27 04:56:57 +08:00
|
|
|
timestamp: options.Timestamp,
|
2020-01-20 19:31:09 +08:00
|
|
|
os: options.OS,
|
2019-11-28 00:31:02 +08:00
|
|
|
maxPullPushRetries: options.MaxPullPushRetries,
|
|
|
|
retryPullPushDelay: options.PullPushRetryDelay,
|
2020-04-02 02:15:56 +08:00
|
|
|
ociDecryptConfig: options.OciDecryptConfig,
|
2021-08-17 01:51:46 +08:00
|
|
|
terminatedStage: make(map[string]error),
|
bud: teach --platform to take a list
Add a pkg/parse.PlatformsFromOptions() which understands a "variant"
value as an optional third value in an OS/ARCH[/VARIANT] argument value,
which accepts a comma-separated list of them, and which returns a list
of platforms.
Teach "from" and "pull" about the --platform option and add integration
tests for them, warning if --platform was given multiple values.
Add a define.BuildOptions.JobSemaphore which an imagebuildah executor
will use in preference to one that it might allocate for itself.
In main(), allocate a JobSemaphore if the number of jobs is not 0 (which
we treat as "unlimited", and continue to allow executors to do).
In addManifest(), take a lock on the manifest list's image ID so that we
don't overwrite changes that another thread might be making while we're
attempting to make changes to it. In main(), create an empty list if
the list doesn't already exist before we start down this path, so that
we don't get two threads trying to create that manifest list at the same
time later on. Two processes could still try to create the same list
twice, but it's an incremental improvement.
Finally, if we've been given multiple platforms to build for, run their
builds concurrently and gather up their results.
Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2021-06-22 22:52:49 +08:00
|
|
|
stagesSemaphore: options.JobSemaphore,
|
2020-08-25 21:53:59 +08:00
|
|
|
logRusage: options.LogRusage,
|
2021-05-28 23:09:27 +08:00
|
|
|
rusageLogFile: rusageLogFile,
|
2020-11-06 06:54:33 +08:00
|
|
|
imageInfoCache: make(map[string]imageTypeAndHistoryAndDiffIDs),
|
2020-11-25 09:04:52 +08:00
|
|
|
fromOverride: options.From,
|
2021-02-05 23:20:18 +08:00
|
|
|
manifest: options.Manifest,
|
2021-04-17 06:21:31 +08:00
|
|
|
secrets: secrets,
|
2021-07-26 14:07:23 +08:00
|
|
|
sshsources: sshsources,
|
2021-08-11 06:11:15 +08:00
|
|
|
logPrefix: logPrefix,
|
2021-11-02 04:52:48 +08:00
|
|
|
unsetEnvs: options.UnsetEnvs,
|
2019-06-28 16:43:50 +08:00
|
|
|
}
|
|
|
|
if exec.err == nil {
|
|
|
|
exec.err = os.Stderr
|
|
|
|
}
|
|
|
|
if exec.out == nil {
|
|
|
|
exec.out = os.Stdout
|
|
|
|
}
|
2020-11-25 09:04:52 +08:00
|
|
|
|
2019-06-28 16:43:50 +08:00
|
|
|
for arg := range options.Args {
|
|
|
|
if _, isBuiltIn := builtinAllowedBuildArgs[arg]; !isBuiltIn {
|
|
|
|
exec.unusedArgs[arg] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for _, line := range mainNode.Children {
|
|
|
|
node := line
|
|
|
|
for node != nil { // tokens on this line, though we only care about the first
|
|
|
|
switch strings.ToUpper(node.Value) { // first token - instruction
|
|
|
|
case "ARG":
|
|
|
|
arg := node.Next
|
|
|
|
if arg != nil {
|
|
|
|
// We have to be careful here - it's either an argument
|
|
|
|
// and value, or just an argument, since they can be
|
|
|
|
// separated by either "=" or whitespace.
|
|
|
|
list := strings.SplitN(arg.Value, "=", 2)
|
2022-01-18 10:28:53 +08:00
|
|
|
delete(exec.unusedArgs, list[0])
|
2019-06-28 16:43:50 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return &exec, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// startStage creates a new stage executor that will be referenced whenever a
|
|
|
|
// COPY or ADD statement uses a --from=NAME flag.
|
2020-07-15 05:20:39 +08:00
|
|
|
func (b *Executor) startStage(ctx context.Context, stage *imagebuilder.Stage, stages imagebuilder.Stages, output string) *StageExecutor {
|
2020-03-24 09:55:36 +08:00
|
|
|
stageExec := &StageExecutor{
|
2020-07-15 05:20:39 +08:00
|
|
|
ctx: ctx,
|
2019-06-28 16:43:50 +08:00
|
|
|
executor: b,
|
2020-10-09 01:41:23 +08:00
|
|
|
log: b.log,
|
2020-03-24 09:55:36 +08:00
|
|
|
index: stage.Position,
|
2019-06-28 16:43:50 +08:00
|
|
|
stages: stages,
|
2020-03-24 09:55:36 +08:00
|
|
|
name: stage.Name,
|
2019-06-28 16:43:50 +08:00
|
|
|
volumeCache: make(map[string]string),
|
|
|
|
volumeCacheInfo: make(map[string]os.FileInfo),
|
|
|
|
output: output,
|
2020-03-24 09:55:36 +08:00
|
|
|
stage: stage,
|
2019-06-28 16:43:50 +08:00
|
|
|
}
|
2020-03-24 09:55:36 +08:00
|
|
|
b.stages[stage.Name] = stageExec
|
|
|
|
if idx := strconv.Itoa(stage.Position); idx != stage.Name {
|
|
|
|
b.stages[idx] = stageExec
|
2019-06-28 16:43:50 +08:00
|
|
|
}
|
2020-03-24 09:55:36 +08:00
|
|
|
return stageExec
|
2019-06-28 16:43:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// resolveNameToImageRef creates a types.ImageReference for the output name in local storage
|
|
|
|
func (b *Executor) resolveNameToImageRef(output string) (types.ImageReference, error) {
|
2021-04-11 01:44:51 +08:00
|
|
|
if imageRef, err := alltransports.ParseImageName(output); err == nil {
|
|
|
|
return imageRef, nil
|
|
|
|
}
|
|
|
|
runtime, err := libimage.RuntimeFromStore(b.store, &libimage.RuntimeOptions{SystemContext: b.systemContext})
|
2019-06-28 16:43:50 +08:00
|
|
|
if err != nil {
|
2021-04-11 01:44:51 +08:00
|
|
|
return nil, err
|
2019-06-28 16:43:50 +08:00
|
|
|
}
|
2021-04-30 15:16:03 +08:00
|
|
|
resolved, err := runtime.ResolveName(output)
|
2021-04-11 01:44:51 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2021-04-30 15:16:03 +08:00
|
|
|
imageRef, err := storageTransport.Transport.ParseStoreReference(b.store, resolved)
|
2021-04-11 01:44:51 +08:00
|
|
|
if err == nil {
|
|
|
|
return imageRef, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return imageRef, err
|
2019-06-28 16:43:50 +08:00
|
|
|
}
|
|
|
|
|
2020-07-15 05:20:39 +08:00
|
|
|
// waitForStage waits for an entry to be added to terminatedStage indicating
|
|
|
|
// that the specified stage has finished. If there is no stage defined by that
|
|
|
|
// name, then it will return (false, nil). If there is a stage defined by that
|
|
|
|
// name, it will return true along with any error it encounters.
|
|
|
|
func (b *Executor) waitForStage(ctx context.Context, name string, stages imagebuilder.Stages) (bool, error) {
|
|
|
|
found := false
|
|
|
|
for _, otherStage := range stages {
|
2021-09-28 05:25:06 +08:00
|
|
|
if otherStage.Name == name || strconv.Itoa(otherStage.Position) == name {
|
2020-07-15 05:20:39 +08:00
|
|
|
found = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !found {
|
|
|
|
return false, nil
|
2020-06-22 16:53:02 +08:00
|
|
|
}
|
|
|
|
for {
|
|
|
|
if b.lastError != nil {
|
2020-07-15 05:20:39 +08:00
|
|
|
return true, b.lastError
|
2020-06-22 16:53:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
b.stagesLock.Lock()
|
2021-08-17 01:51:46 +08:00
|
|
|
terminationError, terminated := b.terminatedStage[name]
|
2020-06-22 16:53:02 +08:00
|
|
|
b.stagesLock.Unlock()
|
|
|
|
|
2021-08-17 01:51:46 +08:00
|
|
|
if terminationError != nil {
|
|
|
|
return false, terminationError
|
|
|
|
}
|
2020-06-22 16:53:02 +08:00
|
|
|
if terminated {
|
2020-07-15 05:20:39 +08:00
|
|
|
return true, nil
|
2020-06-22 16:53:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
b.stagesSemaphore.Release(1)
|
|
|
|
time.Sleep(time.Millisecond * 10)
|
|
|
|
if err := b.stagesSemaphore.Acquire(ctx, 1); err != nil {
|
2020-07-15 05:20:39 +08:00
|
|
|
return true, errors.Wrapf(err, "error reacquiring job semaphore")
|
2020-06-22 16:53:02 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-31 01:47:23 +08:00
|
|
|
// getImageTypeAndHistoryAndDiffIDs returns the manifest type, history, and diff IDs list of imageID.
|
|
|
|
func (b *Executor) getImageTypeAndHistoryAndDiffIDs(ctx context.Context, imageID string) (string, []v1.History, []digest.Digest, error) {
|
2020-11-06 06:54:33 +08:00
|
|
|
b.imageInfoLock.Lock()
|
|
|
|
imageInfo, ok := b.imageInfoCache[imageID]
|
|
|
|
b.imageInfoLock.Unlock()
|
|
|
|
if ok {
|
|
|
|
return imageInfo.manifestType, imageInfo.history, imageInfo.diffIDs, imageInfo.err
|
|
|
|
}
|
2019-06-28 16:43:50 +08:00
|
|
|
imageRef, err := is.Transport.ParseStoreReference(b.store, "@"+imageID)
|
|
|
|
if err != nil {
|
2020-10-31 01:47:23 +08:00
|
|
|
return "", nil, nil, errors.Wrapf(err, "error getting image reference %q", imageID)
|
2019-06-28 16:43:50 +08:00
|
|
|
}
|
|
|
|
ref, err := imageRef.NewImage(ctx, nil)
|
|
|
|
if err != nil {
|
2020-10-31 01:47:23 +08:00
|
|
|
return "", nil, nil, errors.Wrapf(err, "error creating new image from reference to image %q", imageID)
|
2019-06-28 16:43:50 +08:00
|
|
|
}
|
|
|
|
defer ref.Close()
|
|
|
|
oci, err := ref.OCIConfig(ctx)
|
|
|
|
if err != nil {
|
2020-10-31 01:47:23 +08:00
|
|
|
return "", nil, nil, errors.Wrapf(err, "error getting possibly-converted OCI config of image %q", imageID)
|
2019-06-28 16:43:50 +08:00
|
|
|
}
|
2020-10-31 01:47:23 +08:00
|
|
|
manifestBytes, manifestFormat, err := ref.Manifest(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return "", nil, nil, errors.Wrapf(err, "error getting manifest of image %q", imageID)
|
|
|
|
}
|
|
|
|
if manifestFormat == "" && len(manifestBytes) > 0 {
|
|
|
|
manifestFormat = manifest.GuessMIMEType(manifestBytes)
|
|
|
|
}
|
2020-11-06 06:54:33 +08:00
|
|
|
b.imageInfoLock.Lock()
|
|
|
|
b.imageInfoCache[imageID] = imageTypeAndHistoryAndDiffIDs{
|
|
|
|
manifestType: manifestFormat,
|
|
|
|
history: oci.History,
|
|
|
|
diffIDs: oci.RootFS.DiffIDs,
|
|
|
|
err: nil,
|
|
|
|
}
|
|
|
|
b.imageInfoLock.Unlock()
|
2020-10-31 01:47:23 +08:00
|
|
|
return manifestFormat, oci.History, oci.RootFS.DiffIDs, nil
|
2019-06-28 16:43:50 +08:00
|
|
|
}
|
|
|
|
|
2020-06-16 19:39:04 +08:00
|
|
|
func (b *Executor) buildStage(ctx context.Context, cleanupStages map[int]*StageExecutor, stages imagebuilder.Stages, stageIndex int) (imageID string, ref reference.Canonical, err error) {
|
|
|
|
stage := stages[stageIndex]
|
|
|
|
ib := stage.Builder
|
|
|
|
node := stage.Node
|
|
|
|
base, err := ib.From(node)
|
|
|
|
|
|
|
|
// If this is the last stage, then the image that we produce at
|
|
|
|
// its end should be given the desired output name.
|
|
|
|
output := ""
|
|
|
|
if stageIndex == len(stages)-1 {
|
|
|
|
output = b.output
|
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
2021-08-11 06:11:15 +08:00
|
|
|
logrus.Debugf("buildStage(node.Children=%#v)", node.Children)
|
2020-06-16 19:39:04 +08:00
|
|
|
return "", nil, err
|
|
|
|
}
|
|
|
|
|
2020-07-03 01:12:33 +08:00
|
|
|
b.stagesLock.Lock()
|
2020-07-15 05:20:39 +08:00
|
|
|
stageExecutor := b.startStage(ctx, &stage, stages, output)
|
2020-10-09 01:41:23 +08:00
|
|
|
if stageExecutor.log == nil {
|
|
|
|
stepCounter := 0
|
|
|
|
stageExecutor.log = func(format string, args ...interface{}) {
|
2021-08-11 06:11:15 +08:00
|
|
|
prefix := b.logPrefix
|
2020-10-09 01:41:23 +08:00
|
|
|
if len(stages) > 1 {
|
|
|
|
prefix += fmt.Sprintf("[%d/%d] ", stageIndex+1, len(stages))
|
|
|
|
}
|
|
|
|
if !strings.HasPrefix(format, "COMMIT") {
|
|
|
|
stepCounter++
|
|
|
|
prefix += fmt.Sprintf("STEP %d", stepCounter)
|
|
|
|
if stepCounter <= len(stage.Node.Children)+1 {
|
|
|
|
prefix += fmt.Sprintf("/%d", len(stage.Node.Children)+1)
|
|
|
|
}
|
|
|
|
prefix += ": "
|
|
|
|
}
|
|
|
|
suffix := "\n"
|
|
|
|
fmt.Fprintf(stageExecutor.executor.out, prefix+format+suffix, args...)
|
|
|
|
}
|
|
|
|
}
|
2020-07-03 01:12:33 +08:00
|
|
|
b.stagesLock.Unlock()
|
2020-06-16 19:39:04 +08:00
|
|
|
|
|
|
|
// If this a single-layer build, or if it's a multi-layered
|
|
|
|
// build and b.forceRmIntermediateCtrs is set, make sure we
|
|
|
|
// remove the intermediate/build containers, regardless of
|
|
|
|
// whether or not the stage's build fails.
|
2021-02-20 13:52:00 +08:00
|
|
|
// Skip cleanup if the stage has no instructions.
|
|
|
|
if b.forceRmIntermediateCtrs || !b.layers && len(stage.Node.Children) > 0 {
|
2020-06-22 16:53:02 +08:00
|
|
|
b.stagesLock.Lock()
|
2020-06-16 19:39:04 +08:00
|
|
|
cleanupStages[stage.Position] = stageExecutor
|
2020-06-22 16:53:02 +08:00
|
|
|
b.stagesLock.Unlock()
|
2020-06-16 19:39:04 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Build this stage.
|
|
|
|
if imageID, ref, err = stageExecutor.Execute(ctx, base); err != nil {
|
|
|
|
return "", nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// The stage succeeded, so remove its build container if we're
|
|
|
|
// told to delete successful intermediate/build containers for
|
|
|
|
// multi-layered builds.
|
2021-02-20 13:52:00 +08:00
|
|
|
// Skip cleanup if the stage has no instructions.
|
|
|
|
if b.removeIntermediateCtrs && len(stage.Node.Children) > 0 {
|
2020-06-22 16:53:02 +08:00
|
|
|
b.stagesLock.Lock()
|
|
|
|
cleanupStages[stage.Position] = stageExecutor
|
|
|
|
b.stagesLock.Unlock()
|
2020-06-16 19:39:04 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return imageID, ref, nil
|
|
|
|
}
|
|
|
|
|
2019-06-28 16:43:50 +08:00
|
|
|
// Build takes care of the details of running Prepare/Execute/Commit/Delete
|
|
|
|
// over each of the one or more parsed Dockerfiles and stages.
|
|
|
|
func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (imageID string, ref reference.Canonical, err error) {
|
|
|
|
if len(stages) == 0 {
|
|
|
|
return "", nil, errors.New("error building: no stages to build")
|
|
|
|
}
|
|
|
|
var cleanupImages []string
|
|
|
|
cleanupStages := make(map[int]*StageExecutor)
|
|
|
|
|
2019-11-05 21:51:46 +08:00
|
|
|
stdout := b.out
|
|
|
|
if b.quiet {
|
|
|
|
b.out = ioutil.Discard
|
|
|
|
}
|
|
|
|
|
2019-06-28 16:43:50 +08:00
|
|
|
cleanup := func() error {
|
|
|
|
var lastErr error
|
|
|
|
// Clean up any containers associated with the final container
|
|
|
|
// built by a stage, for stages that succeeded, since we no
|
|
|
|
// longer need their filesystem contents.
|
2020-06-22 16:53:02 +08:00
|
|
|
|
|
|
|
b.stagesLock.Lock()
|
2019-06-28 16:43:50 +08:00
|
|
|
for _, stage := range cleanupStages {
|
|
|
|
if err := stage.Delete(); err != nil {
|
|
|
|
logrus.Debugf("Failed to cleanup stage containers: %v", err)
|
|
|
|
lastErr = err
|
|
|
|
}
|
|
|
|
}
|
imagebuildah: fix an attempt to write to a nil map
If the build for a single stage fails, we break out of the loop that's
iterating through all of the stages over in its own goroutine, and start
cleaning up after the stages that were already completed.
Because the function that launched that goroutine also calls its cleanup
function in non-error cases, the cleanup function sets the map that's
used to keep track of what needs to be cleaned up to `nil` after the
function finishes iterating through the map, so that we won't try to
clean up (a given thing that needs to be cleaned up) more than once.
Because the loop that's iterating through all of the stages is running
in its own goroutine, it doesn't stop when the function that started it
returns in error cases, so it would still attempt to build subsequent
stages. Have it check for cases where the map variable has already been
cleared, or if one of the stages that it's already run returned an
error. If the function that it calls to build the stage, using the map
variable as a parameter, is already running at that point, it'll have a
non-`nil` map, so it won't crash, but it might not be cleaned up
correctly, either.
If such a stage finishes, either successfully or with an error, the
goroutine would try to pass the result back to its parent(?) goroutine
over a channel that was no longer being read from, and it would stall,
never releasing the jobs semaphore. Because we started sharing that
semaphore across multiple-platform builds, builds for other platforms
would stall completely, and the whole build would stall. Make the
results channel into a buffered channel to allow it to not stall there.
Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2021-09-23 04:56:55 +08:00
|
|
|
cleanupStages = nil
|
2020-06-22 16:53:02 +08:00
|
|
|
b.stagesLock.Unlock()
|
|
|
|
|
2019-06-28 16:43:50 +08:00
|
|
|
// Clean up any builders that we used to get data from images.
|
|
|
|
for _, builder := range b.containerMap {
|
|
|
|
if err := builder.Delete(); err != nil {
|
|
|
|
logrus.Debugf("Failed to cleanup image containers: %v", err)
|
|
|
|
lastErr = err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
b.containerMap = nil
|
|
|
|
// Clean up any intermediate containers associated with stages,
|
|
|
|
// since we're not keeping them for debugging.
|
|
|
|
if b.removeIntermediateCtrs {
|
|
|
|
if err := b.deleteSuccessfulIntermediateCtrs(); err != nil {
|
|
|
|
logrus.Debugf("Failed to cleanup intermediate containers: %v", err)
|
|
|
|
lastErr = err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Remove images from stages except the last one, since we're
|
|
|
|
// not going to use them as a starting point for any new
|
|
|
|
// stages.
|
|
|
|
for i := range cleanupImages {
|
|
|
|
removeID := cleanupImages[len(cleanupImages)-i-1]
|
|
|
|
if removeID == imageID {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if _, err := b.store.DeleteImage(removeID, true); err != nil {
|
|
|
|
logrus.Debugf("failed to remove intermediate image %q: %v", removeID, err)
|
|
|
|
if b.forceRmIntermediateCtrs || errors.Cause(err) != storage.ErrImageUsedByContainer {
|
|
|
|
lastErr = err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
cleanupImages = nil
|
2021-05-28 23:09:27 +08:00
|
|
|
|
2021-06-29 01:19:37 +08:00
|
|
|
if b.rusageLogFile != nil && b.rusageLogFile != b.out {
|
2021-05-28 23:09:27 +08:00
|
|
|
// we deliberately ignore the error here, as this
|
|
|
|
// function can be called multiple times
|
2021-06-29 01:19:37 +08:00
|
|
|
if closer, ok := b.rusageLogFile.(interface{ Close() error }); ok {
|
|
|
|
closer.Close()
|
|
|
|
}
|
2021-05-28 23:09:27 +08:00
|
|
|
}
|
2019-06-28 16:43:50 +08:00
|
|
|
return lastErr
|
|
|
|
}
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
if cleanupErr := cleanup(); cleanupErr != nil {
|
|
|
|
if err == nil {
|
|
|
|
err = cleanupErr
|
|
|
|
} else {
|
|
|
|
err = errors.Wrap(err, cleanupErr.Error())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
// Build maps of every named base image and every referenced stage root
|
|
|
|
// filesystem. Individual stages can use them to determine whether or
|
|
|
|
// not they can skip certain steps near the end of their stages.
|
2021-07-21 04:23:25 +08:00
|
|
|
for stageIndex, stage := range stages {
|
2019-06-28 16:43:50 +08:00
|
|
|
node := stage.Node // first line
|
|
|
|
for node != nil { // each line
|
|
|
|
for _, child := range node.Children { // tokens on this line, though we only care about the first
|
|
|
|
switch strings.ToUpper(child.Value) { // first token - instruction
|
|
|
|
case "FROM":
|
|
|
|
if child.Next != nil { // second token on this line
|
2020-11-25 09:04:52 +08:00
|
|
|
// If we have a fromOverride, replace the value of
|
|
|
|
// image name for the first FROM in the Containerfile.
|
|
|
|
if b.fromOverride != "" {
|
|
|
|
child.Next.Value = b.fromOverride
|
|
|
|
b.fromOverride = ""
|
|
|
|
}
|
2019-06-28 16:43:50 +08:00
|
|
|
base := child.Next.Value
|
|
|
|
if base != "scratch" {
|
|
|
|
// TODO: this didn't undergo variable and arg
|
|
|
|
// expansion, so if the AS clause in another
|
|
|
|
// FROM instruction uses argument values,
|
|
|
|
// we might not record the right value here.
|
|
|
|
b.baseMap[base] = true
|
2021-07-21 04:23:25 +08:00
|
|
|
logrus.Debugf("base for stage %d: %q", stageIndex, base)
|
2019-06-28 16:43:50 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
case "ADD", "COPY":
|
|
|
|
for _, flag := range child.Flags { // flags for this instruction
|
|
|
|
if strings.HasPrefix(flag, "--from=") {
|
|
|
|
// TODO: this didn't undergo variable and
|
|
|
|
// arg expansion, so if the previous stage
|
|
|
|
// was named using argument values, we might
|
|
|
|
// not record the right value here.
|
2019-08-10 06:37:32 +08:00
|
|
|
rootfs := strings.TrimPrefix(flag, "--from=")
|
2019-06-28 16:43:50 +08:00
|
|
|
b.rootfsMap[rootfs] = true
|
2021-07-21 04:23:25 +08:00
|
|
|
logrus.Debugf("rootfs needed for COPY in stage %d: %q", stageIndex, rootfs)
|
2019-06-28 16:43:50 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
node = node.Next // next line
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-22 16:53:02 +08:00
|
|
|
type Result struct {
|
|
|
|
Index int
|
|
|
|
ImageID string
|
|
|
|
Ref reference.Canonical
|
|
|
|
Error error
|
|
|
|
}
|
|
|
|
|
imagebuildah: fix an attempt to write to a nil map
If the build for a single stage fails, we break out of the loop that's
iterating through all of the stages over in its own goroutine, and start
cleaning up after the stages that were already completed.
Because the function that launched that goroutine also calls its cleanup
function in non-error cases, the cleanup function sets the map that's
used to keep track of what needs to be cleaned up to `nil` after the
function finishes iterating through the map, so that we won't try to
clean up (a given thing that needs to be cleaned up) more than once.
Because the loop that's iterating through all of the stages is running
in its own goroutine, it doesn't stop when the function that started it
returns in error cases, so it would still attempt to build subsequent
stages. Have it check for cases where the map variable has already been
cleared, or if one of the stages that it's already run returned an
error. If the function that it calls to build the stage, using the map
variable as a parameter, is already running at that point, it'll have a
non-`nil` map, so it won't crash, but it might not be cleaned up
correctly, either.
If such a stage finishes, either successfully or with an error, the
goroutine would try to pass the result back to its parent(?) goroutine
over a channel that was no longer being read from, and it would stall,
never releasing the jobs semaphore. Because we started sharing that
semaphore across multiple-platform builds, builds for other platforms
would stall completely, and the whole build would stall. Make the
results channel into a buffered channel to allow it to not stall there.
Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2021-09-23 04:56:55 +08:00
|
|
|
ch := make(chan Result, len(stages))
|
2020-06-22 16:53:02 +08:00
|
|
|
|
bud: teach --platform to take a list
Add a pkg/parse.PlatformsFromOptions() which understands a "variant"
value as an optional third value in an OS/ARCH[/VARIANT] argument value,
which accepts a comma-separated list of them, and which returns a list
of platforms.
Teach "from" and "pull" about the --platform option and add integration
tests for them, warning if --platform was given multiple values.
Add a define.BuildOptions.JobSemaphore which an imagebuildah executor
will use in preference to one that it might allocate for itself.
In main(), allocate a JobSemaphore if the number of jobs is not 0 (which
we treat as "unlimited", and continue to allow executors to do).
In addManifest(), take a lock on the manifest list's image ID so that we
don't overwrite changes that another thread might be making while we're
attempting to make changes to it. In main(), create an empty list if
the list doesn't already exist before we start down this path, so that
we don't get two threads trying to create that manifest list at the same
time later on. Two processes could still try to create the same list
twice, but it's an incremental improvement.
Finally, if we've been given multiple platforms to build for, run their
builds concurrently and gather up their results.
Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2021-06-22 22:52:49 +08:00
|
|
|
if b.stagesSemaphore == nil {
|
2022-01-28 07:08:53 +08:00
|
|
|
b.stagesSemaphore = semaphore.NewWeighted(int64(len(stages)))
|
bud: teach --platform to take a list
Add a pkg/parse.PlatformsFromOptions() which understands a "variant"
value as an optional third value in an OS/ARCH[/VARIANT] argument value,
which accepts a comma-separated list of them, and which returns a list
of platforms.
Teach "from" and "pull" about the --platform option and add integration
tests for them, warning if --platform was given multiple values.
Add a define.BuildOptions.JobSemaphore which an imagebuildah executor
will use in preference to one that it might allocate for itself.
In main(), allocate a JobSemaphore if the number of jobs is not 0 (which
we treat as "unlimited", and continue to allow executors to do).
In addManifest(), take a lock on the manifest list's image ID so that we
don't overwrite changes that another thread might be making while we're
attempting to make changes to it. In main(), create an empty list if
the list doesn't already exist before we start down this path, so that
we don't get two threads trying to create that manifest list at the same
time later on. Two processes could still try to create the same list
twice, but it's an incremental improvement.
Finally, if we've been given multiple platforms to build for, run their
builds concurrently and gather up their results.
Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2021-06-22 22:52:49 +08:00
|
|
|
}
|
2020-06-22 16:53:02 +08:00
|
|
|
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
wg.Add(len(stages))
|
|
|
|
|
|
|
|
go func() {
|
imagebuildah: fix an attempt to write to a nil map
If the build for a single stage fails, we break out of the loop that's
iterating through all of the stages over in its own goroutine, and start
cleaning up after the stages that were already completed.
Because the function that launched that goroutine also calls its cleanup
function in non-error cases, the cleanup function sets the map that's
used to keep track of what needs to be cleaned up to `nil` after the
function finishes iterating through the map, so that we won't try to
clean up (a given thing that needs to be cleaned up) more than once.
Because the loop that's iterating through all of the stages is running
in its own goroutine, it doesn't stop when the function that started it
returns in error cases, so it would still attempt to build subsequent
stages. Have it check for cases where the map variable has already been
cleared, or if one of the stages that it's already run returned an
error. If the function that it calls to build the stage, using the map
variable as a parameter, is already running at that point, it'll have a
non-`nil` map, so it won't crash, but it might not be cleaned up
correctly, either.
If such a stage finishes, either successfully or with an error, the
goroutine would try to pass the result back to its parent(?) goroutine
over a channel that was no longer being read from, and it would stall,
never releasing the jobs semaphore. Because we started sharing that
semaphore across multiple-platform builds, builds for other platforms
would stall completely, and the whole build would stall. Make the
results channel into a buffered channel to allow it to not stall there.
Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2021-09-23 04:56:55 +08:00
|
|
|
cancel := false
|
2020-06-22 16:53:02 +08:00
|
|
|
for stageIndex := range stages {
|
|
|
|
index := stageIndex
|
2020-07-02 14:07:10 +08:00
|
|
|
// Acquire the semaphore before creating the goroutine so we are sure they
|
2020-06-22 16:53:02 +08:00
|
|
|
// run in the specified order.
|
|
|
|
if err := b.stagesSemaphore.Acquire(ctx, 1); err != nil {
|
imagebuildah: fix an attempt to write to a nil map
If the build for a single stage fails, we break out of the loop that's
iterating through all of the stages over in its own goroutine, and start
cleaning up after the stages that were already completed.
Because the function that launched that goroutine also calls its cleanup
function in non-error cases, the cleanup function sets the map that's
used to keep track of what needs to be cleaned up to `nil` after the
function finishes iterating through the map, so that we won't try to
clean up (a given thing that needs to be cleaned up) more than once.
Because the loop that's iterating through all of the stages is running
in its own goroutine, it doesn't stop when the function that started it
returns in error cases, so it would still attempt to build subsequent
stages. Have it check for cases where the map variable has already been
cleared, or if one of the stages that it's already run returned an
error. If the function that it calls to build the stage, using the map
variable as a parameter, is already running at that point, it'll have a
non-`nil` map, so it won't crash, but it might not be cleaned up
correctly, either.
If such a stage finishes, either successfully or with an error, the
goroutine would try to pass the result back to its parent(?) goroutine
over a channel that was no longer being read from, and it would stall,
never releasing the jobs semaphore. Because we started sharing that
semaphore across multiple-platform builds, builds for other platforms
would stall completely, and the whole build would stall. Make the
results channel into a buffered channel to allow it to not stall there.
Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2021-09-23 04:56:55 +08:00
|
|
|
cancel = true
|
2020-06-22 16:53:02 +08:00
|
|
|
b.lastError = err
|
imagebuildah: fix an attempt to write to a nil map
If the build for a single stage fails, we break out of the loop that's
iterating through all of the stages over in its own goroutine, and start
cleaning up after the stages that were already completed.
Because the function that launched that goroutine also calls its cleanup
function in non-error cases, the cleanup function sets the map that's
used to keep track of what needs to be cleaned up to `nil` after the
function finishes iterating through the map, so that we won't try to
clean up (a given thing that needs to be cleaned up) more than once.
Because the loop that's iterating through all of the stages is running
in its own goroutine, it doesn't stop when the function that started it
returns in error cases, so it would still attempt to build subsequent
stages. Have it check for cases where the map variable has already been
cleared, or if one of the stages that it's already run returned an
error. If the function that it calls to build the stage, using the map
variable as a parameter, is already running at that point, it'll have a
non-`nil` map, so it won't crash, but it might not be cleaned up
correctly, either.
If such a stage finishes, either successfully or with an error, the
goroutine would try to pass the result back to its parent(?) goroutine
over a channel that was no longer being read from, and it would stall,
never releasing the jobs semaphore. Because we started sharing that
semaphore across multiple-platform builds, builds for other platforms
would stall completely, and the whole build would stall. Make the
results channel into a buffered channel to allow it to not stall there.
Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2021-09-23 04:56:55 +08:00
|
|
|
ch <- Result{
|
|
|
|
Index: index,
|
|
|
|
Error: err,
|
|
|
|
}
|
|
|
|
wg.Done()
|
|
|
|
continue
|
2020-06-22 16:53:02 +08:00
|
|
|
}
|
imagebuildah: fix an attempt to write to a nil map
If the build for a single stage fails, we break out of the loop that's
iterating through all of the stages over in its own goroutine, and start
cleaning up after the stages that were already completed.
Because the function that launched that goroutine also calls its cleanup
function in non-error cases, the cleanup function sets the map that's
used to keep track of what needs to be cleaned up to `nil` after the
function finishes iterating through the map, so that we won't try to
clean up (a given thing that needs to be cleaned up) more than once.
Because the loop that's iterating through all of the stages is running
in its own goroutine, it doesn't stop when the function that started it
returns in error cases, so it would still attempt to build subsequent
stages. Have it check for cases where the map variable has already been
cleared, or if one of the stages that it's already run returned an
error. If the function that it calls to build the stage, using the map
variable as a parameter, is already running at that point, it'll have a
non-`nil` map, so it won't crash, but it might not be cleaned up
correctly, either.
If such a stage finishes, either successfully or with an error, the
goroutine would try to pass the result back to its parent(?) goroutine
over a channel that was no longer being read from, and it would stall,
never releasing the jobs semaphore. Because we started sharing that
semaphore across multiple-platform builds, builds for other platforms
would stall completely, and the whole build would stall. Make the
results channel into a buffered channel to allow it to not stall there.
Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2021-09-23 04:56:55 +08:00
|
|
|
b.stagesLock.Lock()
|
|
|
|
cleanupStages := cleanupStages
|
|
|
|
b.stagesLock.Unlock()
|
2020-06-22 16:53:02 +08:00
|
|
|
go func() {
|
|
|
|
defer b.stagesSemaphore.Release(1)
|
|
|
|
defer wg.Done()
|
imagebuildah: fix an attempt to write to a nil map
If the build for a single stage fails, we break out of the loop that's
iterating through all of the stages over in its own goroutine, and start
cleaning up after the stages that were already completed.
Because the function that launched that goroutine also calls its cleanup
function in non-error cases, the cleanup function sets the map that's
used to keep track of what needs to be cleaned up to `nil` after the
function finishes iterating through the map, so that we won't try to
clean up (a given thing that needs to be cleaned up) more than once.
Because the loop that's iterating through all of the stages is running
in its own goroutine, it doesn't stop when the function that started it
returns in error cases, so it would still attempt to build subsequent
stages. Have it check for cases where the map variable has already been
cleared, or if one of the stages that it's already run returned an
error. If the function that it calls to build the stage, using the map
variable as a parameter, is already running at that point, it'll have a
non-`nil` map, so it won't crash, but it might not be cleaned up
correctly, either.
If such a stage finishes, either successfully or with an error, the
goroutine would try to pass the result back to its parent(?) goroutine
over a channel that was no longer being read from, and it would stall,
never releasing the jobs semaphore. Because we started sharing that
semaphore across multiple-platform builds, builds for other platforms
would stall completely, and the whole build would stall. Make the
results channel into a buffered channel to allow it to not stall there.
Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2021-09-23 04:56:55 +08:00
|
|
|
if cancel || cleanupStages == nil {
|
|
|
|
var err error
|
|
|
|
if stages[index].Name != strconv.Itoa(index) {
|
|
|
|
err = errors.Errorf("not building stage %d: build canceled", index)
|
|
|
|
} else {
|
|
|
|
err = errors.Errorf("not building stage %d (%s): build canceled", index, stages[index].Name)
|
|
|
|
}
|
|
|
|
ch <- Result{
|
|
|
|
Index: index,
|
|
|
|
Error: err,
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
2020-07-15 03:37:58 +08:00
|
|
|
stageID, stageRef, stageErr := b.buildStage(ctx, cleanupStages, stages, index)
|
|
|
|
if stageErr != nil {
|
imagebuildah: fix an attempt to write to a nil map
If the build for a single stage fails, we break out of the loop that's
iterating through all of the stages over in its own goroutine, and start
cleaning up after the stages that were already completed.
Because the function that launched that goroutine also calls its cleanup
function in non-error cases, the cleanup function sets the map that's
used to keep track of what needs to be cleaned up to `nil` after the
function finishes iterating through the map, so that we won't try to
clean up (a given thing that needs to be cleaned up) more than once.
Because the loop that's iterating through all of the stages is running
in its own goroutine, it doesn't stop when the function that started it
returns in error cases, so it would still attempt to build subsequent
stages. Have it check for cases where the map variable has already been
cleared, or if one of the stages that it's already run returned an
error. If the function that it calls to build the stage, using the map
variable as a parameter, is already running at that point, it'll have a
non-`nil` map, so it won't crash, but it might not be cleaned up
correctly, either.
If such a stage finishes, either successfully or with an error, the
goroutine would try to pass the result back to its parent(?) goroutine
over a channel that was no longer being read from, and it would stall,
never releasing the jobs semaphore. Because we started sharing that
semaphore across multiple-platform builds, builds for other platforms
would stall completely, and the whole build would stall. Make the
results channel into a buffered channel to allow it to not stall there.
Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2021-09-23 04:56:55 +08:00
|
|
|
cancel = true
|
2020-06-22 16:53:02 +08:00
|
|
|
ch <- Result{
|
|
|
|
Index: index,
|
2020-07-15 03:37:58 +08:00
|
|
|
Error: stageErr,
|
2020-06-22 16:53:02 +08:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
ch <- Result{
|
|
|
|
Index: index,
|
2020-07-15 03:37:58 +08:00
|
|
|
ImageID: stageID,
|
|
|
|
Ref: stageRef,
|
2020-06-22 16:53:02 +08:00
|
|
|
Error: nil,
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
go func() {
|
|
|
|
wg.Wait()
|
|
|
|
close(ch)
|
|
|
|
}()
|
|
|
|
|
|
|
|
for r := range ch {
|
|
|
|
stage := stages[r.Index]
|
|
|
|
|
|
|
|
b.stagesLock.Lock()
|
2021-08-17 01:51:46 +08:00
|
|
|
b.terminatedStage[stage.Name] = r.Error
|
2021-09-28 05:25:06 +08:00
|
|
|
b.terminatedStage[strconv.Itoa(stage.Position)] = r.Error
|
2020-06-22 16:53:02 +08:00
|
|
|
|
|
|
|
if r.Error != nil {
|
2021-08-17 01:51:46 +08:00
|
|
|
b.stagesLock.Unlock()
|
2020-06-22 16:53:02 +08:00
|
|
|
b.lastError = r.Error
|
|
|
|
return "", nil, r.Error
|
2019-06-28 16:43:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// If this is an intermediate stage, make a note of the ID, so
|
|
|
|
// that we can look it up later.
|
2020-06-22 16:53:02 +08:00
|
|
|
if r.Index < len(stages)-1 && r.ImageID != "" {
|
|
|
|
b.imageMap[stage.Name] = r.ImageID
|
2019-06-28 16:43:50 +08:00
|
|
|
// We're not populating the cache with intermediate
|
|
|
|
// images, so add this one to the list of images that
|
|
|
|
// we'll remove later.
|
|
|
|
if !b.layers {
|
2020-06-22 16:53:02 +08:00
|
|
|
cleanupImages = append(cleanupImages, r.ImageID)
|
2019-06-28 16:43:50 +08:00
|
|
|
}
|
2020-06-22 16:53:02 +08:00
|
|
|
}
|
|
|
|
if r.Index == len(stages)-1 {
|
|
|
|
imageID = r.ImageID
|
2020-07-15 03:37:58 +08:00
|
|
|
ref = r.Ref
|
2019-06-28 16:43:50 +08:00
|
|
|
}
|
2021-08-17 01:51:46 +08:00
|
|
|
b.stagesLock.Unlock()
|
2019-06-28 16:43:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if len(b.unusedArgs) > 0 {
|
|
|
|
unusedList := make([]string, 0, len(b.unusedArgs))
|
|
|
|
for k := range b.unusedArgs {
|
|
|
|
unusedList = append(unusedList, k)
|
|
|
|
}
|
|
|
|
sort.Strings(unusedList)
|
|
|
|
fmt.Fprintf(b.out, "[Warning] one or more build args were not consumed: %v\n", unusedList)
|
|
|
|
}
|
|
|
|
|
2021-04-20 03:26:05 +08:00
|
|
|
// Add additional tags and print image names recorded in storage
|
|
|
|
if dest, err := b.resolveNameToImageRef(b.output); err == nil {
|
|
|
|
switch dest.Transport().Name() {
|
|
|
|
case is.Transport.Name():
|
|
|
|
img, err := is.Transport.GetStoreImage(b.store, dest)
|
|
|
|
if err != nil {
|
|
|
|
return imageID, ref, errors.Wrapf(err, "error locating just-written image %q", transports.ImageName(dest))
|
|
|
|
}
|
|
|
|
if len(b.additionalTags) > 0 {
|
2021-04-23 16:26:26 +08:00
|
|
|
if err = util.AddImageNames(b.store, "", b.systemContext, img, b.additionalTags); err != nil {
|
2019-06-28 16:43:50 +08:00
|
|
|
return imageID, ref, errors.Wrapf(err, "error setting image names to %v", append(img.Names, b.additionalTags...))
|
|
|
|
}
|
|
|
|
logrus.Debugf("assigned names %v to image %q", img.Names, img.ID)
|
2021-04-20 03:26:05 +08:00
|
|
|
}
|
|
|
|
// Report back the caller the tags applied, if any.
|
|
|
|
img, err = is.Transport.GetStoreImage(b.store, dest)
|
|
|
|
if err != nil {
|
|
|
|
return imageID, ref, errors.Wrapf(err, "error locating just-written image %q", transports.ImageName(dest))
|
|
|
|
}
|
|
|
|
for _, name := range img.Names {
|
|
|
|
fmt.Fprintf(b.out, "Successfully tagged %s\n", name)
|
|
|
|
}
|
|
|
|
|
|
|
|
default:
|
|
|
|
if len(b.additionalTags) > 0 {
|
2021-05-08 01:38:44 +08:00
|
|
|
b.logger.Warnf("don't know how to add tags to images stored in %q transport", dest.Transport().Name())
|
2019-06-28 16:43:50 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := cleanup(); err != nil {
|
|
|
|
return "", nil, err
|
|
|
|
}
|
2020-01-31 01:31:15 +08:00
|
|
|
logrus.Debugf("printing final image id %q", imageID)
|
2019-06-28 16:43:50 +08:00
|
|
|
if b.iidfile != "" {
|
2021-02-08 18:59:39 +08:00
|
|
|
if err = ioutil.WriteFile(b.iidfile, []byte("sha256:"+imageID), 0644); err != nil {
|
2019-06-28 16:43:50 +08:00
|
|
|
return imageID, ref, errors.Wrapf(err, "failed to write image ID to file %q", b.iidfile)
|
|
|
|
}
|
2019-11-05 21:51:46 +08:00
|
|
|
} else {
|
|
|
|
if _, err := stdout.Write([]byte(imageID + "\n")); err != nil {
|
|
|
|
return imageID, ref, errors.Wrapf(err, "failed to write image ID to stdout")
|
|
|
|
}
|
2019-06-28 16:43:50 +08:00
|
|
|
}
|
|
|
|
return imageID, ref, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// deleteSuccessfulIntermediateCtrs goes through the container IDs in each
|
|
|
|
// stage's containerIDs list and deletes the containers associated with those
|
|
|
|
// IDs.
|
|
|
|
func (b *Executor) deleteSuccessfulIntermediateCtrs() error {
|
|
|
|
var lastErr error
|
|
|
|
for _, s := range b.stages {
|
|
|
|
for _, ctr := range s.containerIDs {
|
|
|
|
if err := b.store.DeleteContainer(ctr); err != nil {
|
2021-05-08 01:38:44 +08:00
|
|
|
b.logger.Errorf("error deleting build container %q: %v\n", ctr, err)
|
2019-06-28 16:43:50 +08:00
|
|
|
lastErr = err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// The stages map includes some stages under multiple keys, so
|
|
|
|
// clearing their lists after we process a given stage is
|
|
|
|
// necessary to avoid triggering errors that would occur if we
|
|
|
|
// tried to delete a given stage's containers multiple times.
|
|
|
|
s.containerIDs = nil
|
|
|
|
}
|
|
|
|
return lastErr
|
|
|
|
}
|