2019-06-28 16:43:50 +08:00
|
|
|
package imagebuildah
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2022-07-06 17:14:06 +08:00
|
|
|
"errors"
|
2019-06-28 16:43:50 +08:00
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"os"
|
2025-01-23 22:27:47 +08:00
|
|
|
"slices"
|
2019-06-28 16:43:50 +08:00
|
|
|
"strconv"
|
|
|
|
"strings"
|
2020-06-22 16:53:02 +08:00
|
|
|
"sync"
|
2019-11-28 00:31:02 +08:00
|
|
|
"time"
|
2019-06-28 16:43:50 +08:00
|
|
|
|
|
|
|
"github.com/containers/buildah"
|
2021-02-07 06:49:40 +08:00
|
|
|
"github.com/containers/buildah/define"
|
2025-04-30 05:04:20 +08:00
|
|
|
"github.com/containers/buildah/internal"
|
2023-12-31 01:42:31 +08:00
|
|
|
internalUtil "github.com/containers/buildah/internal/util"
|
2020-02-08 01:54:18 +08:00
|
|
|
"github.com/containers/buildah/pkg/parse"
|
2021-07-26 14:07:23 +08:00
|
|
|
"github.com/containers/buildah/pkg/sshagent"
|
2019-06-28 16:43:50 +08:00
|
|
|
"github.com/containers/buildah/util"
|
2020-04-02 02:15:56 +08:00
|
|
|
encconfig "github.com/containers/ocicrypt/config"
|
2020-09-02 02:41:22 +08:00
|
|
|
digest "github.com/opencontainers/go-digest"
|
2019-06-28 16:43:50 +08:00
|
|
|
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
|
|
|
"github.com/openshift/imagebuilder"
|
|
|
|
"github.com/openshift/imagebuilder/dockerfile/parser"
|
|
|
|
"github.com/sirupsen/logrus"
|
2025-08-29 20:55:12 +08:00
|
|
|
"go.podman.io/common/libimage"
|
|
|
|
nettypes "go.podman.io/common/libnetwork/types"
|
|
|
|
"go.podman.io/common/pkg/config"
|
|
|
|
"go.podman.io/image/v5/docker/reference"
|
|
|
|
"go.podman.io/image/v5/manifest"
|
|
|
|
storageTransport "go.podman.io/image/v5/storage"
|
|
|
|
"go.podman.io/image/v5/transports"
|
|
|
|
"go.podman.io/image/v5/transports/alltransports"
|
|
|
|
"go.podman.io/image/v5/types"
|
|
|
|
"go.podman.io/storage"
|
|
|
|
"go.podman.io/storage/pkg/archive"
|
2020-06-22 16:53:02 +08:00
|
|
|
"golang.org/x/sync/semaphore"
|
2019-06-28 16:43:50 +08:00
|
|
|
)
|
|
|
|
|
|
|
|
// builtinAllowedBuildArgs is list of built-in allowed build args. Normally we
|
|
|
|
// complain if we're given values for arguments which have no corresponding ARG
|
|
|
|
// instruction in the Dockerfile, since that's usually an indication of a user
|
|
|
|
// error, but for these values we make exceptions and ignore them.
|
2023-12-31 01:42:31 +08:00
|
|
|
var builtinAllowedBuildArgs = map[string]struct{}{
|
2025-04-30 05:04:20 +08:00
|
|
|
"HTTP_PROXY": {},
|
|
|
|
"http_proxy": {},
|
|
|
|
"HTTPS_PROXY": {},
|
|
|
|
"https_proxy": {},
|
|
|
|
"FTP_PROXY": {},
|
|
|
|
"ftp_proxy": {},
|
|
|
|
"NO_PROXY": {},
|
|
|
|
"no_proxy": {},
|
|
|
|
"TARGETARCH": {},
|
|
|
|
"TARGETOS": {},
|
|
|
|
"TARGETPLATFORM": {},
|
|
|
|
"TARGETVARIANT": {},
|
|
|
|
internal.SourceDateEpochName: {},
|
2019-06-28 16:43:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Executor is a buildah-based implementation of the imagebuilder.Executor
|
2019-08-10 04:21:24 +08:00
|
|
|
// interface. It coordinates the entire build by using one or more
|
|
|
|
// StageExecutors to handle each stage of the build.
|
2019-06-28 16:43:50 +08:00
|
|
|
type Executor struct {
|
2022-12-05 14:29:19 +08:00
|
|
|
cacheFrom []reference.Named
|
|
|
|
cacheTo []reference.Named
|
2022-08-04 15:14:39 +08:00
|
|
|
cacheTTL time.Duration
|
2021-12-15 06:11:32 +08:00
|
|
|
containerSuffix string
|
2021-05-08 01:38:44 +08:00
|
|
|
logger *logrus.Logger
|
2019-06-28 16:43:50 +08:00
|
|
|
stages map[string]*StageExecutor
|
|
|
|
store storage.Store
|
|
|
|
contextDir string
|
2021-02-07 06:49:40 +08:00
|
|
|
pullPolicy define.PullPolicy
|
2019-06-28 16:43:50 +08:00
|
|
|
registry string
|
|
|
|
ignoreUnrecognizedInstructions bool
|
|
|
|
quiet bool
|
|
|
|
runtime string
|
|
|
|
runtimeArgs []string
|
|
|
|
transientMounts []Mount
|
|
|
|
compression archive.Compression
|
|
|
|
output string
|
|
|
|
outputFormat string
|
|
|
|
additionalTags []string
|
2025-04-08 02:59:01 +08:00
|
|
|
log func(format string, args ...any) // can be nil
|
2019-06-28 16:43:50 +08:00
|
|
|
in io.Reader
|
2023-09-23 00:28:44 +08:00
|
|
|
inheritLabels types.OptionalBool
|
2025-06-03 00:14:55 +08:00
|
|
|
inheritAnnotations types.OptionalBool
|
2019-06-28 16:43:50 +08:00
|
|
|
out io.Writer
|
|
|
|
err io.Writer
|
|
|
|
signaturePolicyPath string
|
2022-09-15 18:00:23 +08:00
|
|
|
skipUnusedStages types.OptionalBool
|
2019-06-28 16:43:50 +08:00
|
|
|
systemContext *types.SystemContext
|
|
|
|
reportWriter io.Writer
|
2021-02-07 06:49:40 +08:00
|
|
|
isolation define.Isolation
|
|
|
|
namespaceOptions []define.NamespaceOption
|
|
|
|
configureNetwork define.NetworkConfigurationPolicy
|
2019-06-28 16:43:50 +08:00
|
|
|
cniPluginPath string
|
|
|
|
cniConfigDir string
|
2022-01-06 04:36:49 +08:00
|
|
|
// NetworkInterface is the libnetwork network interface used to setup CNI or netavark networks.
|
2024-05-16 23:41:16 +08:00
|
|
|
networkInterface nettypes.ContainerNetwork
|
|
|
|
idmappingOptions *define.IDMappingOptions
|
|
|
|
commonBuildOptions *define.CommonBuildOptions
|
|
|
|
defaultMountsFilePath string
|
|
|
|
iidfile string
|
|
|
|
squash bool
|
|
|
|
labels []string
|
|
|
|
layerLabels []string
|
|
|
|
annotations []string
|
|
|
|
layers bool
|
|
|
|
noHostname bool
|
|
|
|
noHosts bool
|
|
|
|
useCache bool
|
|
|
|
removeIntermediateCtrs bool
|
|
|
|
forceRmIntermediateCtrs bool
|
|
|
|
imageMap map[string]string // Used to map images that we create to handle the AS construct.
|
|
|
|
containerMap map[string]*buildah.Builder // Used to map from image names to only-created-for-the-rootfs containers.
|
|
|
|
baseMap map[string]struct{} // Holds the names of every base image, as given.
|
|
|
|
rootfsMap map[string]struct{} // Holds the names of every stage whose rootfs is referenced in a COPY or ADD instruction.
|
|
|
|
blobDirectory string
|
|
|
|
excludes []string
|
|
|
|
groupAdd []string
|
|
|
|
ignoreFile string
|
|
|
|
args map[string]string
|
|
|
|
globalArgs map[string]string
|
|
|
|
unusedArgs map[string]struct{}
|
|
|
|
capabilities []string
|
|
|
|
devices define.ContainerDevices
|
|
|
|
deviceSpecs []string
|
|
|
|
signBy string
|
|
|
|
architecture string
|
|
|
|
timestamp *time.Time
|
|
|
|
os string
|
|
|
|
maxPullPushRetries int
|
|
|
|
retryPullPushDelay time.Duration
|
|
|
|
cachePullSourceLookupReferenceFunc libimage.LookupReferenceFunc
|
2024-05-24 08:44:11 +08:00
|
|
|
cachePullDestinationLookupReferenceFunc func(srcRef types.ImageReference) libimage.LookupReferenceFunc
|
2024-05-16 23:41:16 +08:00
|
|
|
cachePushSourceLookupReferenceFunc func(dest types.ImageReference) libimage.LookupReferenceFunc
|
|
|
|
cachePushDestinationLookupReferenceFunc libimage.LookupReferenceFunc
|
|
|
|
ociDecryptConfig *encconfig.DecryptConfig
|
|
|
|
lastError error
|
|
|
|
terminatedStage map[string]error
|
|
|
|
stagesLock sync.Mutex
|
|
|
|
stagesSemaphore *semaphore.Weighted
|
|
|
|
logRusage bool
|
|
|
|
rusageLogFile io.Writer
|
|
|
|
imageInfoLock sync.Mutex
|
|
|
|
imageInfoCache map[string]imageTypeAndHistoryAndDiffIDs
|
|
|
|
fromOverride string
|
|
|
|
additionalBuildContexts map[string]*define.AdditionalBuildContext
|
|
|
|
manifest string
|
|
|
|
secrets map[string]define.Secret
|
|
|
|
sshsources map[string]*sshagent.Source
|
|
|
|
logPrefix string
|
|
|
|
unsetEnvs []string
|
|
|
|
unsetLabels []string
|
2025-06-02 00:16:38 +08:00
|
|
|
unsetAnnotations []string
|
2025-05-15 04:26:38 +08:00
|
|
|
processLabel string // Shares processLabel of first stage container with containers of other stages in same build
|
|
|
|
mountLabel string // Shares mountLabel of first stage container with containers of other stages in same build
|
|
|
|
buildOutputs []string // Specifies instructions for any custom build output
|
2024-05-16 23:41:16 +08:00
|
|
|
osVersion string
|
|
|
|
osFeatures []string
|
|
|
|
envs []string
|
|
|
|
confidentialWorkload define.ConfidentialWorkloadOptions
|
|
|
|
sbomScanOptions []define.SBOMScanOptions
|
|
|
|
cdiConfigDir string
|
2024-06-07 03:57:56 +08:00
|
|
|
compatSetParent types.OptionalBool
|
2024-06-07 04:32:13 +08:00
|
|
|
compatVolumes types.OptionalBool
|
2024-08-16 01:41:05 +08:00
|
|
|
compatScratchConfig types.OptionalBool
|
2025-05-02 22:04:19 +08:00
|
|
|
compatLayerOmissions types.OptionalBool
|
2024-11-13 19:31:38 +08:00
|
|
|
noPivotRoot bool
|
2025-04-30 05:04:20 +08:00
|
|
|
sourceDateEpoch *time.Time
|
|
|
|
rewriteTimestamp bool
|
2025-06-21 03:05:20 +08:00
|
|
|
createdAnnotation types.OptionalBool
|
2020-11-06 06:54:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
type imageTypeAndHistoryAndDiffIDs struct {
|
|
|
|
manifestType string
|
|
|
|
history []v1.History
|
|
|
|
diffIDs []digest.Digest
|
|
|
|
err error
|
2025-07-08 22:46:31 +08:00
|
|
|
architecture string
|
|
|
|
os string
|
2019-06-28 16:43:50 +08:00
|
|
|
}
|
|
|
|
|
2021-08-11 06:11:15 +08:00
|
|
|
// newExecutor creates a new instance of the imagebuilder.Executor interface.
|
2022-09-13 14:35:34 +08:00
|
|
|
func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, options define.BuildOptions, mainNode *parser.Node, containerFiles []string) (*Executor, error) {
|
2020-02-08 01:54:18 +08:00
|
|
|
defaultContainerConfig, err := config.Default()
|
|
|
|
if err != nil {
|
2022-07-06 17:14:06 +08:00
|
|
|
return nil, fmt.Errorf("failed to get container config: %w", err)
|
2020-02-08 01:54:18 +08:00
|
|
|
}
|
|
|
|
|
2020-11-19 21:14:58 +08:00
|
|
|
excludes := options.Excludes
|
|
|
|
if len(excludes) == 0 {
|
2022-09-13 14:35:34 +08:00
|
|
|
excludes, options.IgnoreFile, err = parse.ContainerIgnoreFile(options.ContextDirectory, options.IgnoreFile, containerFiles)
|
2020-11-19 21:14:58 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-06-28 16:43:50 +08:00
|
|
|
}
|
2020-05-01 23:25:14 +08:00
|
|
|
capabilities, err := defaultContainerConfig.Capabilities("", options.AddCapabilities, options.DropCapabilities)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-02-08 01:54:18 +08:00
|
|
|
|
2024-03-26 09:13:35 +08:00
|
|
|
var transientMounts []Mount
|
2020-02-08 01:54:18 +08:00
|
|
|
|
2023-10-24 14:58:31 +08:00
|
|
|
for _, volume := range append(defaultContainerConfig.Volumes(), options.TransientMounts...) {
|
2020-02-08 01:54:18 +08:00
|
|
|
mount, err := parse.Volume(volume)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2021-07-21 04:23:25 +08:00
|
|
|
transientMounts = append([]Mount{mount}, transientMounts...)
|
2020-02-08 01:54:18 +08:00
|
|
|
}
|
2019-06-28 16:43:50 +08:00
|
|
|
|
2021-04-17 06:21:31 +08:00
|
|
|
secrets, err := parse.Secrets(options.CommonBuildOpts.Secrets)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2021-07-26 14:07:23 +08:00
|
|
|
sshsources, err := parse.SSH(options.CommonBuildOpts.SSHSources)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-07-02 14:07:10 +08:00
|
|
|
|
2020-08-24 21:14:17 +08:00
|
|
|
writer := options.ReportWriter
|
|
|
|
if options.Quiet {
|
2022-11-15 00:22:45 +08:00
|
|
|
writer = io.Discard
|
2020-08-24 21:14:17 +08:00
|
|
|
}
|
|
|
|
|
2021-06-29 01:19:37 +08:00
|
|
|
var rusageLogFile io.Writer
|
|
|
|
|
2021-05-28 23:09:27 +08:00
|
|
|
if options.LogRusage && !options.Quiet {
|
|
|
|
if options.RusageLogFile == "" {
|
2021-06-29 01:19:37 +08:00
|
|
|
rusageLogFile = options.Out
|
2021-05-28 23:09:27 +08:00
|
|
|
} else {
|
2024-08-16 00:50:07 +08:00
|
|
|
rusageLogFile, err = os.OpenFile(options.RusageLogFile, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0o644)
|
2021-05-28 23:09:27 +08:00
|
|
|
if err != nil {
|
2024-09-10 05:02:51 +08:00
|
|
|
return nil, fmt.Errorf("creating file to store rusage logs: %w", err)
|
2021-05-28 23:09:27 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2025-05-15 04:26:38 +08:00
|
|
|
buildOutputs := slices.Clone(options.BuildOutputs)
|
|
|
|
if options.BuildOutput != "" { //nolint:staticcheck
|
|
|
|
buildOutputs = append(buildOutputs, options.BuildOutput) //nolint:staticcheck
|
|
|
|
}
|
|
|
|
|
2019-06-28 16:43:50 +08:00
|
|
|
exec := Executor{
|
2024-05-16 23:41:16 +08:00
|
|
|
args: options.Args,
|
|
|
|
cacheFrom: options.CacheFrom,
|
|
|
|
cacheTo: options.CacheTo,
|
|
|
|
cacheTTL: options.CacheTTL,
|
|
|
|
containerSuffix: options.ContainerSuffix,
|
|
|
|
logger: logger,
|
|
|
|
stages: make(map[string]*StageExecutor),
|
|
|
|
store: store,
|
|
|
|
contextDir: options.ContextDirectory,
|
|
|
|
excludes: excludes,
|
|
|
|
groupAdd: options.GroupAdd,
|
|
|
|
ignoreFile: options.IgnoreFile,
|
|
|
|
pullPolicy: options.PullPolicy,
|
|
|
|
registry: options.Registry,
|
|
|
|
ignoreUnrecognizedInstructions: options.IgnoreUnrecognizedInstructions,
|
|
|
|
quiet: options.Quiet,
|
|
|
|
runtime: options.Runtime,
|
|
|
|
runtimeArgs: options.RuntimeArgs,
|
|
|
|
transientMounts: transientMounts,
|
|
|
|
compression: options.Compression,
|
|
|
|
output: options.Output,
|
|
|
|
outputFormat: options.OutputFormat,
|
|
|
|
additionalTags: options.AdditionalTags,
|
|
|
|
signaturePolicyPath: options.SignaturePolicyPath,
|
|
|
|
skipUnusedStages: options.SkipUnusedStages,
|
|
|
|
systemContext: options.SystemContext,
|
|
|
|
log: options.Log,
|
|
|
|
in: options.In,
|
|
|
|
out: options.Out,
|
|
|
|
err: options.Err,
|
|
|
|
reportWriter: writer,
|
|
|
|
isolation: options.Isolation,
|
2023-09-23 00:28:44 +08:00
|
|
|
inheritLabels: options.InheritLabels,
|
2025-06-03 00:14:55 +08:00
|
|
|
inheritAnnotations: options.InheritAnnotations,
|
2024-05-16 23:41:16 +08:00
|
|
|
namespaceOptions: options.NamespaceOptions,
|
|
|
|
configureNetwork: options.ConfigureNetwork,
|
|
|
|
cniPluginPath: options.CNIPluginPath,
|
|
|
|
cniConfigDir: options.CNIConfigDir,
|
|
|
|
networkInterface: options.NetworkInterface,
|
|
|
|
idmappingOptions: options.IDMappingOptions,
|
|
|
|
commonBuildOptions: options.CommonBuildOpts,
|
|
|
|
defaultMountsFilePath: options.DefaultMountsFilePath,
|
|
|
|
iidfile: options.IIDFile,
|
|
|
|
squash: options.Squash,
|
2024-06-04 02:44:47 +08:00
|
|
|
labels: slices.Clone(options.Labels),
|
|
|
|
layerLabels: slices.Clone(options.LayerLabels),
|
|
|
|
annotations: slices.Clone(options.Annotations),
|
2024-05-16 23:41:16 +08:00
|
|
|
layers: options.Layers,
|
|
|
|
noHostname: options.CommonBuildOpts.NoHostname,
|
|
|
|
noHosts: options.CommonBuildOpts.NoHosts,
|
|
|
|
useCache: !options.NoCache,
|
|
|
|
removeIntermediateCtrs: options.RemoveIntermediateCtrs,
|
|
|
|
forceRmIntermediateCtrs: options.ForceRmIntermediateCtrs,
|
|
|
|
imageMap: make(map[string]string),
|
|
|
|
containerMap: make(map[string]*buildah.Builder),
|
|
|
|
baseMap: make(map[string]struct{}),
|
|
|
|
rootfsMap: make(map[string]struct{}),
|
|
|
|
blobDirectory: options.BlobDirectory,
|
|
|
|
unusedArgs: make(map[string]struct{}),
|
|
|
|
capabilities: capabilities,
|
|
|
|
deviceSpecs: options.Devices,
|
|
|
|
signBy: options.SignBy,
|
|
|
|
architecture: options.Architecture,
|
|
|
|
timestamp: options.Timestamp,
|
|
|
|
os: options.OS,
|
|
|
|
maxPullPushRetries: options.MaxPullPushRetries,
|
|
|
|
retryPullPushDelay: options.PullPushRetryDelay,
|
|
|
|
cachePullSourceLookupReferenceFunc: options.CachePullSourceLookupReferenceFunc,
|
|
|
|
cachePullDestinationLookupReferenceFunc: options.CachePullDestinationLookupReferenceFunc,
|
|
|
|
cachePushSourceLookupReferenceFunc: options.CachePushSourceLookupReferenceFunc,
|
|
|
|
cachePushDestinationLookupReferenceFunc: options.CachePushDestinationLookupReferenceFunc,
|
|
|
|
ociDecryptConfig: options.OciDecryptConfig,
|
|
|
|
terminatedStage: make(map[string]error),
|
|
|
|
stagesSemaphore: options.JobSemaphore,
|
|
|
|
logRusage: options.LogRusage,
|
|
|
|
rusageLogFile: rusageLogFile,
|
|
|
|
imageInfoCache: make(map[string]imageTypeAndHistoryAndDiffIDs),
|
|
|
|
fromOverride: options.From,
|
|
|
|
additionalBuildContexts: options.AdditionalBuildContexts,
|
|
|
|
manifest: options.Manifest,
|
|
|
|
secrets: secrets,
|
|
|
|
sshsources: sshsources,
|
|
|
|
logPrefix: logPrefix,
|
2024-06-04 02:44:47 +08:00
|
|
|
unsetEnvs: slices.Clone(options.UnsetEnvs),
|
|
|
|
unsetLabels: slices.Clone(options.UnsetLabels),
|
2025-06-02 00:16:38 +08:00
|
|
|
unsetAnnotations: slices.Clone(options.UnsetAnnotations),
|
2025-05-15 04:26:38 +08:00
|
|
|
buildOutputs: buildOutputs,
|
2024-05-16 23:41:16 +08:00
|
|
|
osVersion: options.OSVersion,
|
2024-06-04 02:44:47 +08:00
|
|
|
osFeatures: slices.Clone(options.OSFeatures),
|
|
|
|
envs: slices.Clone(options.Envs),
|
2024-05-16 23:41:16 +08:00
|
|
|
confidentialWorkload: options.ConfidentialWorkload,
|
|
|
|
sbomScanOptions: options.SBOMScanOptions,
|
|
|
|
cdiConfigDir: options.CDIConfigDir,
|
2024-06-07 03:57:56 +08:00
|
|
|
compatSetParent: options.CompatSetParent,
|
2024-06-07 04:32:13 +08:00
|
|
|
compatVolumes: options.CompatVolumes,
|
2024-08-16 01:41:05 +08:00
|
|
|
compatScratchConfig: options.CompatScratchConfig,
|
2025-05-02 22:04:19 +08:00
|
|
|
compatLayerOmissions: options.CompatLayerOmissions,
|
2024-11-13 19:31:38 +08:00
|
|
|
noPivotRoot: options.NoPivotRoot,
|
2025-04-30 05:04:20 +08:00
|
|
|
sourceDateEpoch: options.SourceDateEpoch,
|
|
|
|
rewriteTimestamp: options.RewriteTimestamp,
|
2025-06-21 03:05:20 +08:00
|
|
|
createdAnnotation: options.CreatedAnnotation,
|
2019-06-28 16:43:50 +08:00
|
|
|
}
|
2025-06-02 00:16:38 +08:00
|
|
|
// sort unsetAnnotations because we will later write these
|
|
|
|
// values to the history of the image therefore we want to
|
|
|
|
// make sure that order is always consistent.
|
|
|
|
slices.Sort(exec.unsetAnnotations)
|
|
|
|
|
2019-06-28 16:43:50 +08:00
|
|
|
if exec.err == nil {
|
|
|
|
exec.err = os.Stderr
|
|
|
|
}
|
|
|
|
if exec.out == nil {
|
|
|
|
exec.out = os.Stdout
|
|
|
|
}
|
2020-11-25 09:04:52 +08:00
|
|
|
|
2019-06-28 16:43:50 +08:00
|
|
|
for arg := range options.Args {
|
|
|
|
if _, isBuiltIn := builtinAllowedBuildArgs[arg]; !isBuiltIn {
|
|
|
|
exec.unusedArgs[arg] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
2023-08-18 18:55:59 +08:00
|
|
|
// Use this flag to collect all args declared before
|
|
|
|
// first stage and treat them as global args which is
|
|
|
|
// accessible to all stages.
|
|
|
|
foundFirstStage := false
|
|
|
|
globalArgs := make(map[string]string)
|
2019-06-28 16:43:50 +08:00
|
|
|
for _, line := range mainNode.Children {
|
|
|
|
node := line
|
|
|
|
for node != nil { // tokens on this line, though we only care about the first
|
|
|
|
switch strings.ToUpper(node.Value) { // first token - instruction
|
|
|
|
case "ARG":
|
|
|
|
arg := node.Next
|
|
|
|
if arg != nil {
|
|
|
|
// We have to be careful here - it's either an argument
|
|
|
|
// and value, or just an argument, since they can be
|
|
|
|
// separated by either "=" or whitespace.
|
2023-12-31 02:10:49 +08:00
|
|
|
argName, argValue, hasValue := strings.Cut(arg.Value, "=")
|
2023-08-18 18:55:59 +08:00
|
|
|
if !foundFirstStage {
|
2023-12-31 02:10:49 +08:00
|
|
|
if hasValue {
|
|
|
|
globalArgs[argName] = argValue
|
2023-08-18 18:55:59 +08:00
|
|
|
}
|
|
|
|
}
|
2023-12-31 02:10:49 +08:00
|
|
|
delete(exec.unusedArgs, argName)
|
2019-06-28 16:43:50 +08:00
|
|
|
}
|
2023-08-18 18:55:59 +08:00
|
|
|
case "FROM":
|
|
|
|
foundFirstStage = true
|
2019-06-28 16:43:50 +08:00
|
|
|
}
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2023-08-18 18:55:59 +08:00
|
|
|
exec.globalArgs = globalArgs
|
2019-06-28 16:43:50 +08:00
|
|
|
return &exec, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// startStage creates a new stage executor that will be referenced whenever a
|
|
|
|
// COPY or ADD statement uses a --from=NAME flag.
|
2020-07-15 05:20:39 +08:00
|
|
|
func (b *Executor) startStage(ctx context.Context, stage *imagebuilder.Stage, stages imagebuilder.Stages, output string) *StageExecutor {
|
2025-04-03 22:55:17 +08:00
|
|
|
// create a copy of systemContext for each stage executor.
|
|
|
|
systemContext := *b.systemContext
|
2020-03-24 09:55:36 +08:00
|
|
|
stageExec := &StageExecutor{
|
2020-07-15 05:20:39 +08:00
|
|
|
ctx: ctx,
|
2019-06-28 16:43:50 +08:00
|
|
|
executor: b,
|
2025-04-03 22:55:17 +08:00
|
|
|
systemContext: &systemContext,
|
2020-10-09 01:41:23 +08:00
|
|
|
log: b.log,
|
2020-03-24 09:55:36 +08:00
|
|
|
index: stage.Position,
|
2019-06-28 16:43:50 +08:00
|
|
|
stages: stages,
|
2020-03-24 09:55:36 +08:00
|
|
|
name: stage.Name,
|
2019-06-28 16:43:50 +08:00
|
|
|
volumeCache: make(map[string]string),
|
|
|
|
volumeCacheInfo: make(map[string]os.FileInfo),
|
|
|
|
output: output,
|
2020-03-24 09:55:36 +08:00
|
|
|
stage: stage,
|
2019-06-28 16:43:50 +08:00
|
|
|
}
|
2020-03-24 09:55:36 +08:00
|
|
|
b.stages[stage.Name] = stageExec
|
|
|
|
if idx := strconv.Itoa(stage.Position); idx != stage.Name {
|
|
|
|
b.stages[idx] = stageExec
|
2019-06-28 16:43:50 +08:00
|
|
|
}
|
2020-03-24 09:55:36 +08:00
|
|
|
return stageExec
|
2019-06-28 16:43:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// resolveNameToImageRef creates a types.ImageReference for the output name in local storage
|
|
|
|
func (b *Executor) resolveNameToImageRef(output string) (types.ImageReference, error) {
|
2021-04-11 01:44:51 +08:00
|
|
|
if imageRef, err := alltransports.ParseImageName(output); err == nil {
|
|
|
|
return imageRef, nil
|
|
|
|
}
|
2023-09-05 23:12:45 +08:00
|
|
|
resolved, err := libimage.NormalizeName(output)
|
2019-06-28 16:43:50 +08:00
|
|
|
if err != nil {
|
2021-04-11 01:44:51 +08:00
|
|
|
return nil, err
|
2019-06-28 16:43:50 +08:00
|
|
|
}
|
2023-09-05 23:12:45 +08:00
|
|
|
imageRef, err := storageTransport.Transport.ParseStoreReference(b.store, resolved.String())
|
2021-04-11 01:44:51 +08:00
|
|
|
if err == nil {
|
|
|
|
return imageRef, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return imageRef, err
|
2019-06-28 16:43:50 +08:00
|
|
|
}
|
|
|
|
|
2020-07-15 05:20:39 +08:00
|
|
|
// waitForStage waits for an entry to be added to terminatedStage indicating
|
|
|
|
// that the specified stage has finished. If there is no stage defined by that
|
|
|
|
// name, then it will return (false, nil). If there is a stage defined by that
|
|
|
|
// name, it will return true along with any error it encounters.
|
|
|
|
func (b *Executor) waitForStage(ctx context.Context, name string, stages imagebuilder.Stages) (bool, error) {
|
|
|
|
found := false
|
|
|
|
for _, otherStage := range stages {
|
2021-09-28 05:25:06 +08:00
|
|
|
if otherStage.Name == name || strconv.Itoa(otherStage.Position) == name {
|
2020-07-15 05:20:39 +08:00
|
|
|
found = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !found {
|
|
|
|
return false, nil
|
2020-06-22 16:53:02 +08:00
|
|
|
}
|
|
|
|
for {
|
|
|
|
if b.lastError != nil {
|
2020-07-15 05:20:39 +08:00
|
|
|
return true, b.lastError
|
2020-06-22 16:53:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
b.stagesLock.Lock()
|
2021-08-17 01:51:46 +08:00
|
|
|
terminationError, terminated := b.terminatedStage[name]
|
2020-06-22 16:53:02 +08:00
|
|
|
b.stagesLock.Unlock()
|
|
|
|
|
2021-08-17 01:51:46 +08:00
|
|
|
if terminationError != nil {
|
|
|
|
return false, terminationError
|
|
|
|
}
|
2020-06-22 16:53:02 +08:00
|
|
|
if terminated {
|
2020-07-15 05:20:39 +08:00
|
|
|
return true, nil
|
2020-06-22 16:53:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
b.stagesSemaphore.Release(1)
|
|
|
|
time.Sleep(time.Millisecond * 10)
|
|
|
|
if err := b.stagesSemaphore.Acquire(ctx, 1); err != nil {
|
2022-09-18 18:36:08 +08:00
|
|
|
return true, fmt.Errorf("reacquiring job semaphore: %w", err)
|
2020-06-22 16:53:02 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2025-07-08 22:46:31 +08:00
|
|
|
// getImageTypeAndHistoryAndDiffIDs returns the os, architecture, manifest type, history, and diff IDs list of imageID.
|
|
|
|
func (b *Executor) getImageTypeAndHistoryAndDiffIDs(ctx context.Context, imageID string) (string, string, string, []v1.History, []digest.Digest, error) {
|
2020-11-06 06:54:33 +08:00
|
|
|
b.imageInfoLock.Lock()
|
|
|
|
imageInfo, ok := b.imageInfoCache[imageID]
|
|
|
|
b.imageInfoLock.Unlock()
|
|
|
|
if ok {
|
2025-07-08 22:46:31 +08:00
|
|
|
return imageInfo.os, imageInfo.architecture, imageInfo.manifestType, imageInfo.history, imageInfo.diffIDs, imageInfo.err
|
2020-11-06 06:54:33 +08:00
|
|
|
}
|
2023-06-26 16:40:39 +08:00
|
|
|
imageRef, err := storageTransport.Transport.ParseStoreReference(b.store, "@"+imageID)
|
2019-06-28 16:43:50 +08:00
|
|
|
if err != nil {
|
2025-07-08 22:46:31 +08:00
|
|
|
return "", "", "", nil, nil, fmt.Errorf("getting image reference %q: %w", imageID, err)
|
2019-06-28 16:43:50 +08:00
|
|
|
}
|
|
|
|
ref, err := imageRef.NewImage(ctx, nil)
|
|
|
|
if err != nil {
|
2025-07-08 22:46:31 +08:00
|
|
|
return "", "", "", nil, nil, fmt.Errorf("creating new image from reference to image %q: %w", imageID, err)
|
2019-06-28 16:43:50 +08:00
|
|
|
}
|
|
|
|
defer ref.Close()
|
|
|
|
oci, err := ref.OCIConfig(ctx)
|
|
|
|
if err != nil {
|
2025-07-08 22:46:31 +08:00
|
|
|
return "", "", "", nil, nil, fmt.Errorf("getting possibly-converted OCI config of image %q: %w", imageID, err)
|
2019-06-28 16:43:50 +08:00
|
|
|
}
|
2020-10-31 01:47:23 +08:00
|
|
|
manifestBytes, manifestFormat, err := ref.Manifest(ctx)
|
|
|
|
if err != nil {
|
2025-07-08 22:46:31 +08:00
|
|
|
return "", "", "", nil, nil, fmt.Errorf("getting manifest of image %q: %w", imageID, err)
|
2020-10-31 01:47:23 +08:00
|
|
|
}
|
|
|
|
if manifestFormat == "" && len(manifestBytes) > 0 {
|
|
|
|
manifestFormat = manifest.GuessMIMEType(manifestBytes)
|
|
|
|
}
|
2020-11-06 06:54:33 +08:00
|
|
|
b.imageInfoLock.Lock()
|
|
|
|
b.imageInfoCache[imageID] = imageTypeAndHistoryAndDiffIDs{
|
|
|
|
manifestType: manifestFormat,
|
|
|
|
history: oci.History,
|
|
|
|
diffIDs: oci.RootFS.DiffIDs,
|
|
|
|
err: nil,
|
2025-07-08 22:46:31 +08:00
|
|
|
architecture: oci.Architecture,
|
|
|
|
os: oci.OS,
|
2020-11-06 06:54:33 +08:00
|
|
|
}
|
|
|
|
b.imageInfoLock.Unlock()
|
2025-07-08 22:46:31 +08:00
|
|
|
return oci.OS, oci.Architecture, manifestFormat, oci.History, oci.RootFS.DiffIDs, nil
|
2019-06-28 16:43:50 +08:00
|
|
|
}
|
|
|
|
|
2023-10-09 16:29:52 +08:00
|
|
|
func (b *Executor) buildStage(ctx context.Context, cleanupStages map[int]*StageExecutor, stages imagebuilder.Stages, stageIndex int) (imageID string, ref reference.Canonical, onlyBaseImage bool, err error) {
|
2020-06-16 19:39:04 +08:00
|
|
|
stage := stages[stageIndex]
|
|
|
|
ib := stage.Builder
|
|
|
|
node := stage.Node
|
|
|
|
base, err := ib.From(node)
|
2023-01-10 03:41:21 +08:00
|
|
|
if err != nil {
|
|
|
|
logrus.Debugf("buildStage(node.Children=%#v)", node.Children)
|
2023-10-09 16:29:52 +08:00
|
|
|
return "", nil, false, err
|
2023-01-10 03:41:21 +08:00
|
|
|
}
|
2020-06-16 19:39:04 +08:00
|
|
|
|
|
|
|
// If this is the last stage, then the image that we produce at
|
|
|
|
// its end should be given the desired output name.
|
|
|
|
output := ""
|
|
|
|
if stageIndex == len(stages)-1 {
|
|
|
|
output = b.output
|
2023-05-26 15:16:10 +08:00
|
|
|
// Check if any labels were passed in via the API, and add a final line
|
|
|
|
// to the Dockerfile that would provide the same result.
|
|
|
|
// Reason: Docker adds label modification as a last step which can be
|
|
|
|
// processed like regular steps, and if no modification is done to
|
2024-01-28 20:25:10 +08:00
|
|
|
// layers, its easier to reuse cached layers.
|
2023-05-26 15:16:10 +08:00
|
|
|
if len(b.labels) > 0 {
|
|
|
|
var labelLine string
|
2025-04-08 03:27:12 +08:00
|
|
|
labels := slices.Clone(b.labels)
|
2023-05-26 15:16:10 +08:00
|
|
|
for _, labelSpec := range labels {
|
2023-12-31 02:10:49 +08:00
|
|
|
key, value, _ := strings.Cut(labelSpec, "=")
|
2023-05-26 15:16:10 +08:00
|
|
|
// check only for an empty key since docker allows empty values
|
|
|
|
if key != "" {
|
|
|
|
labelLine += fmt.Sprintf(" %q=%q", key, value)
|
|
|
|
}
|
2023-03-20 16:31:55 +08:00
|
|
|
}
|
2023-05-26 15:16:10 +08:00
|
|
|
if len(labelLine) > 0 {
|
|
|
|
additionalNode, err := imagebuilder.ParseDockerfile(strings.NewReader("LABEL" + labelLine + "\n"))
|
|
|
|
if err != nil {
|
2023-10-09 16:29:52 +08:00
|
|
|
return "", nil, false, fmt.Errorf("while adding additional LABEL step: %w", err)
|
2023-05-26 15:16:10 +08:00
|
|
|
}
|
|
|
|
stage.Node.Children = append(stage.Node.Children, additionalNode.Children...)
|
2023-03-20 16:31:55 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-06-16 19:39:04 +08:00
|
|
|
|
2023-01-10 03:41:21 +08:00
|
|
|
// If this stage is starting out with environment variables that were
|
|
|
|
// passed in via our API, we should include them in the history, since
|
|
|
|
// they affect RUN instructions in this stage.
|
|
|
|
if len(b.envs) > 0 {
|
|
|
|
var envLine string
|
|
|
|
for _, envSpec := range b.envs {
|
2023-12-31 02:10:49 +08:00
|
|
|
key, value, hasValue := strings.Cut(envSpec, "=")
|
|
|
|
if hasValue {
|
2023-01-10 03:41:21 +08:00
|
|
|
envLine += fmt.Sprintf(" %q=%q", key, value)
|
|
|
|
} else {
|
2023-10-09 16:29:52 +08:00
|
|
|
return "", nil, false, fmt.Errorf("BUG: unresolved environment variable: %q", key)
|
2023-01-10 03:41:21 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(envLine) > 0 {
|
|
|
|
additionalNode, err := imagebuilder.ParseDockerfile(strings.NewReader("ENV" + envLine + "\n"))
|
|
|
|
if err != nil {
|
2023-10-09 16:29:52 +08:00
|
|
|
return "", nil, false, fmt.Errorf("while adding additional ENV step: %w", err)
|
2023-01-10 03:41:21 +08:00
|
|
|
}
|
|
|
|
// make this the first instruction in the stage after its FROM instruction
|
|
|
|
stage.Node.Children = append(additionalNode.Children, stage.Node.Children...)
|
|
|
|
}
|
2020-06-16 19:39:04 +08:00
|
|
|
}
|
|
|
|
|
2020-07-03 01:12:33 +08:00
|
|
|
b.stagesLock.Lock()
|
2020-07-15 05:20:39 +08:00
|
|
|
stageExecutor := b.startStage(ctx, &stage, stages, output)
|
2020-10-09 01:41:23 +08:00
|
|
|
if stageExecutor.log == nil {
|
|
|
|
stepCounter := 0
|
2025-04-08 02:59:01 +08:00
|
|
|
stageExecutor.log = func(format string, args ...any) {
|
2021-08-11 06:11:15 +08:00
|
|
|
prefix := b.logPrefix
|
2020-10-09 01:41:23 +08:00
|
|
|
if len(stages) > 1 {
|
|
|
|
prefix += fmt.Sprintf("[%d/%d] ", stageIndex+1, len(stages))
|
|
|
|
}
|
|
|
|
if !strings.HasPrefix(format, "COMMIT") {
|
|
|
|
stepCounter++
|
|
|
|
prefix += fmt.Sprintf("STEP %d", stepCounter)
|
|
|
|
if stepCounter <= len(stage.Node.Children)+1 {
|
|
|
|
prefix += fmt.Sprintf("/%d", len(stage.Node.Children)+1)
|
|
|
|
}
|
|
|
|
prefix += ": "
|
|
|
|
}
|
|
|
|
suffix := "\n"
|
|
|
|
fmt.Fprintf(stageExecutor.executor.out, prefix+format+suffix, args...)
|
|
|
|
}
|
|
|
|
}
|
2020-07-03 01:12:33 +08:00
|
|
|
b.stagesLock.Unlock()
|
2020-06-16 19:39:04 +08:00
|
|
|
|
|
|
|
// If this a single-layer build, or if it's a multi-layered
|
|
|
|
// build and b.forceRmIntermediateCtrs is set, make sure we
|
|
|
|
// remove the intermediate/build containers, regardless of
|
|
|
|
// whether or not the stage's build fails.
|
2023-02-15 14:59:22 +08:00
|
|
|
if b.forceRmIntermediateCtrs || !b.layers {
|
2020-06-22 16:53:02 +08:00
|
|
|
b.stagesLock.Lock()
|
2020-06-16 19:39:04 +08:00
|
|
|
cleanupStages[stage.Position] = stageExecutor
|
2020-06-22 16:53:02 +08:00
|
|
|
b.stagesLock.Unlock()
|
2020-06-16 19:39:04 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Build this stage.
|
2023-10-09 16:29:52 +08:00
|
|
|
if imageID, ref, onlyBaseImage, err = stageExecutor.Execute(ctx, base); err != nil {
|
|
|
|
return "", nil, onlyBaseImage, err
|
2020-06-16 19:39:04 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// The stage succeeded, so remove its build container if we're
|
|
|
|
// told to delete successful intermediate/build containers for
|
|
|
|
// multi-layered builds.
|
2021-02-20 13:52:00 +08:00
|
|
|
// Skip cleanup if the stage has no instructions.
|
|
|
|
if b.removeIntermediateCtrs && len(stage.Node.Children) > 0 {
|
2020-06-22 16:53:02 +08:00
|
|
|
b.stagesLock.Lock()
|
|
|
|
cleanupStages[stage.Position] = stageExecutor
|
|
|
|
b.stagesLock.Unlock()
|
2020-06-16 19:39:04 +08:00
|
|
|
}
|
|
|
|
|
2023-10-09 16:29:52 +08:00
|
|
|
return imageID, ref, onlyBaseImage, nil
|
2020-06-16 19:39:04 +08:00
|
|
|
}
|
|
|
|
|
2022-05-19 19:09:59 +08:00
|
|
|
type stageDependencyInfo struct {
|
|
|
|
Name string
|
|
|
|
Position int
|
|
|
|
Needs []string
|
|
|
|
NeededByTarget bool
|
|
|
|
}
|
|
|
|
|
|
|
|
// Marks `NeededByTarget` as true for the given stage and all its dependency stages as true recursively.
|
|
|
|
func markDependencyStagesForTarget(dependencyMap map[string]*stageDependencyInfo, stage string) {
|
|
|
|
if stageDependencyInfo, ok := dependencyMap[stage]; ok {
|
|
|
|
if !stageDependencyInfo.NeededByTarget {
|
|
|
|
stageDependencyInfo.NeededByTarget = true
|
|
|
|
for _, need := range stageDependencyInfo.Needs {
|
|
|
|
markDependencyStagesForTarget(dependencyMap, need)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-10-10 15:42:20 +08:00
|
|
|
func (b *Executor) warnOnUnsetBuildArgs(stages imagebuilder.Stages, dependencyMap map[string]*stageDependencyInfo, args map[string]string) {
|
2023-12-31 01:42:31 +08:00
|
|
|
argFound := make(map[string]struct{})
|
2022-10-10 15:42:20 +08:00
|
|
|
for _, stage := range stages {
|
|
|
|
node := stage.Node // first line
|
|
|
|
for node != nil { // each line
|
|
|
|
for _, child := range node.Children {
|
|
|
|
switch strings.ToUpper(child.Value) {
|
|
|
|
case "ARG":
|
|
|
|
argName := child.Next.Value
|
|
|
|
if strings.Contains(argName, "=") {
|
|
|
|
res := strings.Split(argName, "=")
|
|
|
|
if res[1] != "" {
|
2023-12-31 01:42:31 +08:00
|
|
|
argFound[res[0]] = struct{}{}
|
2022-10-10 15:42:20 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
argHasValue := true
|
|
|
|
if !strings.Contains(argName, "=") {
|
2023-12-31 01:42:31 +08:00
|
|
|
argHasValue = internalUtil.SetHas(argFound, argName)
|
2022-10-10 15:42:20 +08:00
|
|
|
}
|
|
|
|
if _, ok := args[argName]; !argHasValue && !ok {
|
|
|
|
shouldWarn := true
|
|
|
|
if stageDependencyInfo, ok := dependencyMap[stage.Name]; ok {
|
|
|
|
if !stageDependencyInfo.NeededByTarget && b.skipUnusedStages != types.OptionalBoolFalse {
|
|
|
|
shouldWarn = false
|
|
|
|
}
|
|
|
|
}
|
2023-03-02 01:18:02 +08:00
|
|
|
if _, isBuiltIn := builtinAllowedBuildArgs[argName]; isBuiltIn {
|
|
|
|
shouldWarn = false
|
|
|
|
}
|
2023-08-18 18:55:59 +08:00
|
|
|
if _, isGlobalArg := b.globalArgs[argName]; isGlobalArg {
|
|
|
|
shouldWarn = false
|
|
|
|
}
|
2022-10-10 15:42:20 +08:00
|
|
|
if shouldWarn {
|
|
|
|
b.logger.Warnf("missing %q build argument. Try adding %q to the command line", argName, fmt.Sprintf("--build-arg %s=<VALUE>", argName))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
node = node.Next
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-28 16:43:50 +08:00
|
|
|
// Build takes care of the details of running Prepare/Execute/Commit/Delete
|
|
|
|
// over each of the one or more parsed Dockerfiles and stages.
|
|
|
|
func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (imageID string, ref reference.Canonical, err error) {
|
|
|
|
if len(stages) == 0 {
|
2022-09-18 18:36:08 +08:00
|
|
|
return "", nil, errors.New("building: no stages to build")
|
2019-06-28 16:43:50 +08:00
|
|
|
}
|
|
|
|
var cleanupImages []string
|
|
|
|
cleanupStages := make(map[int]*StageExecutor)
|
|
|
|
|
2019-11-05 21:51:46 +08:00
|
|
|
stdout := b.out
|
|
|
|
if b.quiet {
|
2022-11-15 00:22:45 +08:00
|
|
|
b.out = io.Discard
|
2019-11-05 21:51:46 +08:00
|
|
|
}
|
|
|
|
|
2019-06-28 16:43:50 +08:00
|
|
|
cleanup := func() error {
|
|
|
|
var lastErr error
|
|
|
|
// Clean up any containers associated with the final container
|
|
|
|
// built by a stage, for stages that succeeded, since we no
|
|
|
|
// longer need their filesystem contents.
|
2020-06-22 16:53:02 +08:00
|
|
|
|
|
|
|
b.stagesLock.Lock()
|
2019-06-28 16:43:50 +08:00
|
|
|
for _, stage := range cleanupStages {
|
|
|
|
if err := stage.Delete(); err != nil {
|
|
|
|
logrus.Debugf("Failed to cleanup stage containers: %v", err)
|
|
|
|
lastErr = err
|
|
|
|
}
|
|
|
|
}
|
imagebuildah: fix an attempt to write to a nil map
If the build for a single stage fails, we break out of the loop that's
iterating through all of the stages over in its own goroutine, and start
cleaning up after the stages that were already completed.
Because the function that launched that goroutine also calls its cleanup
function in non-error cases, the cleanup function sets the map that's
used to keep track of what needs to be cleaned up to `nil` after the
function finishes iterating through the map, so that we won't try to
clean up (a given thing that needs to be cleaned up) more than once.
Because the loop that's iterating through all of the stages is running
in its own goroutine, it doesn't stop when the function that started it
returns in error cases, so it would still attempt to build subsequent
stages. Have it check for cases where the map variable has already been
cleared, or if one of the stages that it's already run returned an
error. If the function that it calls to build the stage, using the map
variable as a parameter, is already running at that point, it'll have a
non-`nil` map, so it won't crash, but it might not be cleaned up
correctly, either.
If such a stage finishes, either successfully or with an error, the
goroutine would try to pass the result back to its parent(?) goroutine
over a channel that was no longer being read from, and it would stall,
never releasing the jobs semaphore. Because we started sharing that
semaphore across multiple-platform builds, builds for other platforms
would stall completely, and the whole build would stall. Make the
results channel into a buffered channel to allow it to not stall there.
Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2021-09-23 04:56:55 +08:00
|
|
|
cleanupStages = nil
|
2020-06-22 16:53:02 +08:00
|
|
|
b.stagesLock.Unlock()
|
|
|
|
|
2019-06-28 16:43:50 +08:00
|
|
|
// Clean up any builders that we used to get data from images.
|
|
|
|
for _, builder := range b.containerMap {
|
|
|
|
if err := builder.Delete(); err != nil {
|
|
|
|
logrus.Debugf("Failed to cleanup image containers: %v", err)
|
|
|
|
lastErr = err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
b.containerMap = nil
|
|
|
|
// Clean up any intermediate containers associated with stages,
|
|
|
|
// since we're not keeping them for debugging.
|
|
|
|
if b.removeIntermediateCtrs {
|
|
|
|
if err := b.deleteSuccessfulIntermediateCtrs(); err != nil {
|
|
|
|
logrus.Debugf("Failed to cleanup intermediate containers: %v", err)
|
|
|
|
lastErr = err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Remove images from stages except the last one, since we're
|
|
|
|
// not going to use them as a starting point for any new
|
|
|
|
// stages.
|
|
|
|
for i := range cleanupImages {
|
|
|
|
removeID := cleanupImages[len(cleanupImages)-i-1]
|
|
|
|
if removeID == imageID {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if _, err := b.store.DeleteImage(removeID, true); err != nil {
|
|
|
|
logrus.Debugf("failed to remove intermediate image %q: %v", removeID, err)
|
2022-07-06 17:14:06 +08:00
|
|
|
if b.forceRmIntermediateCtrs || !errors.Is(err, storage.ErrImageUsedByContainer) {
|
2019-06-28 16:43:50 +08:00
|
|
|
lastErr = err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
cleanupImages = nil
|
2021-05-28 23:09:27 +08:00
|
|
|
|
2021-06-29 01:19:37 +08:00
|
|
|
if b.rusageLogFile != nil && b.rusageLogFile != b.out {
|
2021-05-28 23:09:27 +08:00
|
|
|
// we deliberately ignore the error here, as this
|
|
|
|
// function can be called multiple times
|
2021-06-29 01:19:37 +08:00
|
|
|
if closer, ok := b.rusageLogFile.(interface{ Close() error }); ok {
|
|
|
|
closer.Close()
|
|
|
|
}
|
2021-05-28 23:09:27 +08:00
|
|
|
}
|
2019-06-28 16:43:50 +08:00
|
|
|
return lastErr
|
|
|
|
}
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
if cleanupErr := cleanup(); cleanupErr != nil {
|
|
|
|
if err == nil {
|
|
|
|
err = cleanupErr
|
|
|
|
} else {
|
2022-07-06 17:14:06 +08:00
|
|
|
err = fmt.Errorf("%v: %w", cleanupErr.Error(), err)
|
2019-06-28 16:43:50 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2022-05-19 19:09:59 +08:00
|
|
|
// dependencyMap contains dependencyInfo for each stage,
|
|
|
|
// dependencyInfo is used later to mark if a particular
|
|
|
|
// stage is needed by target or not.
|
|
|
|
dependencyMap := make(map[string]*stageDependencyInfo)
|
2019-06-28 16:43:50 +08:00
|
|
|
// Build maps of every named base image and every referenced stage root
|
|
|
|
// filesystem. Individual stages can use them to determine whether or
|
|
|
|
// not they can skip certain steps near the end of their stages.
|
2021-07-21 04:23:25 +08:00
|
|
|
for stageIndex, stage := range stages {
|
2022-05-19 19:09:59 +08:00
|
|
|
dependencyMap[stage.Name] = &stageDependencyInfo{Name: stage.Name, Position: stage.Position}
|
2019-06-28 16:43:50 +08:00
|
|
|
node := stage.Node // first line
|
|
|
|
for node != nil { // each line
|
|
|
|
for _, child := range node.Children { // tokens on this line, though we only care about the first
|
|
|
|
switch strings.ToUpper(child.Value) { // first token - instruction
|
|
|
|
case "FROM":
|
|
|
|
if child.Next != nil { // second token on this line
|
2020-11-25 09:04:52 +08:00
|
|
|
// If we have a fromOverride, replace the value of
|
|
|
|
// image name for the first FROM in the Containerfile.
|
|
|
|
if b.fromOverride != "" {
|
|
|
|
child.Next.Value = b.fromOverride
|
|
|
|
b.fromOverride = ""
|
|
|
|
}
|
2019-06-28 16:43:50 +08:00
|
|
|
base := child.Next.Value
|
2023-11-08 06:27:39 +08:00
|
|
|
if base != "" && base != buildah.BaseImageFakeName {
|
2022-05-10 18:11:37 +08:00
|
|
|
if replaceBuildContext, ok := b.additionalBuildContexts[child.Next.Value]; ok {
|
|
|
|
if replaceBuildContext.IsImage {
|
|
|
|
child.Next.Value = replaceBuildContext.Value
|
|
|
|
base = child.Next.Value
|
|
|
|
}
|
|
|
|
}
|
2024-06-28 23:05:49 +08:00
|
|
|
builtinArgs := argsMapToSlice(stage.Builder.BuiltinArgDefaults)
|
2022-11-01 13:41:28 +08:00
|
|
|
headingArgs := argsMapToSlice(stage.Builder.HeadingArgs)
|
2022-05-02 13:07:50 +08:00
|
|
|
userArgs := argsMapToSlice(stage.Builder.Args)
|
2022-11-01 13:41:28 +08:00
|
|
|
// append heading args so if --build-arg key=value is not
|
|
|
|
// specified but default value is set in Containerfile
|
|
|
|
// via `ARG key=value` so default value can be used.
|
2024-06-28 23:05:49 +08:00
|
|
|
userArgs = append(builtinArgs, append(userArgs, headingArgs...)...)
|
2022-05-02 13:07:50 +08:00
|
|
|
baseWithArg, err := imagebuilder.ProcessWord(base, userArgs)
|
|
|
|
if err != nil {
|
2022-07-06 17:14:06 +08:00
|
|
|
return "", nil, fmt.Errorf("while replacing arg variables with values for format %q: %w", base, err)
|
2022-05-02 13:07:50 +08:00
|
|
|
}
|
2023-12-31 01:42:31 +08:00
|
|
|
b.baseMap[baseWithArg] = struct{}{}
|
2024-06-28 23:05:49 +08:00
|
|
|
logrus.Debugf("base for stage %d: %q resolves to %q", stageIndex, base, baseWithArg)
|
2022-05-19 19:09:59 +08:00
|
|
|
// Check if selected base is not an additional
|
|
|
|
// build context and if base is a valid stage
|
|
|
|
// add it to current stage's dependency tree.
|
|
|
|
if _, ok := b.additionalBuildContexts[baseWithArg]; !ok {
|
|
|
|
if _, ok := dependencyMap[baseWithArg]; ok {
|
|
|
|
// update current stage's dependency info
|
|
|
|
currentStageInfo := dependencyMap[stage.Name]
|
|
|
|
currentStageInfo.Needs = append(currentStageInfo.Needs, baseWithArg)
|
|
|
|
}
|
|
|
|
}
|
2019-06-28 16:43:50 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
case "ADD", "COPY":
|
|
|
|
for _, flag := range child.Flags { // flags for this instruction
|
|
|
|
if strings.HasPrefix(flag, "--from=") {
|
|
|
|
// TODO: this didn't undergo variable and
|
|
|
|
// arg expansion, so if the previous stage
|
|
|
|
// was named using argument values, we might
|
|
|
|
// not record the right value here.
|
2019-08-10 06:37:32 +08:00
|
|
|
rootfs := strings.TrimPrefix(flag, "--from=")
|
2023-12-31 01:42:31 +08:00
|
|
|
b.rootfsMap[rootfs] = struct{}{}
|
2021-07-21 04:23:25 +08:00
|
|
|
logrus.Debugf("rootfs needed for COPY in stage %d: %q", stageIndex, rootfs)
|
2022-05-19 19:09:59 +08:00
|
|
|
// Populate dependency tree and check
|
|
|
|
// if following ADD or COPY needs any other
|
|
|
|
// stage.
|
|
|
|
stageName := rootfs
|
2024-06-28 23:05:49 +08:00
|
|
|
builtinArgs := argsMapToSlice(stage.Builder.BuiltinArgDefaults)
|
2023-03-02 17:03:47 +08:00
|
|
|
headingArgs := argsMapToSlice(stage.Builder.HeadingArgs)
|
|
|
|
userArgs := argsMapToSlice(stage.Builder.Args)
|
|
|
|
// append heading args so if --build-arg key=value is not
|
|
|
|
// specified but default value is set in Containerfile
|
|
|
|
// via `ARG key=value` so default value can be used.
|
2024-06-28 23:05:49 +08:00
|
|
|
userArgs = append(builtinArgs, append(userArgs, headingArgs...)...)
|
2023-03-02 17:03:47 +08:00
|
|
|
baseWithArg, err := imagebuilder.ProcessWord(stageName, userArgs)
|
|
|
|
if err != nil {
|
|
|
|
return "", nil, fmt.Errorf("while replacing arg variables with values for format %q: %w", stageName, err)
|
|
|
|
}
|
2024-06-28 23:05:49 +08:00
|
|
|
logrus.Debugf("stage %d name: %q resolves to %q", stageIndex, stageName, baseWithArg)
|
2023-03-02 17:03:47 +08:00
|
|
|
stageName = baseWithArg
|
2022-05-19 19:09:59 +08:00
|
|
|
// If --from=<index> convert index to name
|
2025-08-08 22:16:17 +08:00
|
|
|
if index, err := strconv.Atoi(stageName); err == nil && index >= 0 && index < stageIndex {
|
2022-05-19 19:09:59 +08:00
|
|
|
stageName = stages[index].Name
|
|
|
|
}
|
|
|
|
// Check if selected base is not an additional
|
|
|
|
// build context and if base is a valid stage
|
|
|
|
// add it to current stage's dependency tree.
|
|
|
|
if _, ok := b.additionalBuildContexts[stageName]; !ok {
|
|
|
|
if _, ok := dependencyMap[stageName]; ok {
|
|
|
|
// update current stage's dependency info
|
|
|
|
currentStageInfo := dependencyMap[stage.Name]
|
|
|
|
currentStageInfo.Needs = append(currentStageInfo.Needs, stageName)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
case "RUN":
|
|
|
|
for _, flag := range child.Flags { // flags for this instruction
|
|
|
|
// We need to populate dependency tree of stages
|
|
|
|
// if it is using `--mount` and `from=` field is set
|
|
|
|
// and `from=` points to a stage consider it in
|
|
|
|
// dependency calculation.
|
|
|
|
if strings.HasPrefix(flag, "--mount=") && strings.Contains(flag, "from") {
|
|
|
|
mountFlags := strings.TrimPrefix(flag, "--mount=")
|
|
|
|
fields := strings.Split(mountFlags, ",")
|
|
|
|
for _, field := range fields {
|
2023-12-31 02:10:49 +08:00
|
|
|
if mountFrom, hasFrom := strings.CutPrefix(field, "from="); hasFrom {
|
|
|
|
// Check if this base is a stage if yes
|
|
|
|
// add base to current stage's dependency tree
|
|
|
|
// but also confirm if this is not in additional context.
|
|
|
|
if _, ok := b.additionalBuildContexts[mountFrom]; !ok {
|
|
|
|
// Treat from as a rootfs we need to preserve
|
2023-12-31 01:42:31 +08:00
|
|
|
b.rootfsMap[mountFrom] = struct{}{}
|
2023-12-31 02:10:49 +08:00
|
|
|
if _, ok := dependencyMap[mountFrom]; ok {
|
|
|
|
// update current stage's dependency info
|
|
|
|
currentStageInfo := dependencyMap[stage.Name]
|
|
|
|
currentStageInfo.Needs = append(currentStageInfo.Needs, mountFrom)
|
2022-05-19 19:09:59 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-06-28 16:43:50 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
node = node.Next // next line
|
|
|
|
}
|
2022-05-19 19:09:59 +08:00
|
|
|
// Last stage is always target stage.
|
|
|
|
// Since last/target stage is processed
|
|
|
|
// let's calculate dependency map of stages
|
|
|
|
// so we can mark stages which can be skipped.
|
|
|
|
if stage.Position == (len(stages) - 1) {
|
|
|
|
markDependencyStagesForTarget(dependencyMap, stage.Name)
|
|
|
|
}
|
2019-06-28 16:43:50 +08:00
|
|
|
}
|
2022-10-10 15:42:20 +08:00
|
|
|
b.warnOnUnsetBuildArgs(stages, dependencyMap, b.args)
|
2019-06-28 16:43:50 +08:00
|
|
|
|
2020-06-22 16:53:02 +08:00
|
|
|
type Result struct {
|
2023-10-09 16:29:52 +08:00
|
|
|
Index int
|
|
|
|
ImageID string
|
|
|
|
OnlyBaseImage bool
|
|
|
|
Ref reference.Canonical
|
|
|
|
Error error
|
2020-06-22 16:53:02 +08:00
|
|
|
}
|
|
|
|
|
imagebuildah: fix an attempt to write to a nil map
If the build for a single stage fails, we break out of the loop that's
iterating through all of the stages over in its own goroutine, and start
cleaning up after the stages that were already completed.
Because the function that launched that goroutine also calls its cleanup
function in non-error cases, the cleanup function sets the map that's
used to keep track of what needs to be cleaned up to `nil` after the
function finishes iterating through the map, so that we won't try to
clean up (a given thing that needs to be cleaned up) more than once.
Because the loop that's iterating through all of the stages is running
in its own goroutine, it doesn't stop when the function that started it
returns in error cases, so it would still attempt to build subsequent
stages. Have it check for cases where the map variable has already been
cleared, or if one of the stages that it's already run returned an
error. If the function that it calls to build the stage, using the map
variable as a parameter, is already running at that point, it'll have a
non-`nil` map, so it won't crash, but it might not be cleaned up
correctly, either.
If such a stage finishes, either successfully or with an error, the
goroutine would try to pass the result back to its parent(?) goroutine
over a channel that was no longer being read from, and it would stall,
never releasing the jobs semaphore. Because we started sharing that
semaphore across multiple-platform builds, builds for other platforms
would stall completely, and the whole build would stall. Make the
results channel into a buffered channel to allow it to not stall there.
Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2021-09-23 04:56:55 +08:00
|
|
|
ch := make(chan Result, len(stages))
|
2020-06-22 16:53:02 +08:00
|
|
|
|
bud: teach --platform to take a list
Add a pkg/parse.PlatformsFromOptions() which understands a "variant"
value as an optional third value in an OS/ARCH[/VARIANT] argument value,
which accepts a comma-separated list of them, and which returns a list
of platforms.
Teach "from" and "pull" about the --platform option and add integration
tests for them, warning if --platform was given multiple values.
Add a define.BuildOptions.JobSemaphore which an imagebuildah executor
will use in preference to one that it might allocate for itself.
In main(), allocate a JobSemaphore if the number of jobs is not 0 (which
we treat as "unlimited", and continue to allow executors to do).
In addManifest(), take a lock on the manifest list's image ID so that we
don't overwrite changes that another thread might be making while we're
attempting to make changes to it. In main(), create an empty list if
the list doesn't already exist before we start down this path, so that
we don't get two threads trying to create that manifest list at the same
time later on. Two processes could still try to create the same list
twice, but it's an incremental improvement.
Finally, if we've been given multiple platforms to build for, run their
builds concurrently and gather up their results.
Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2021-06-22 22:52:49 +08:00
|
|
|
if b.stagesSemaphore == nil {
|
2022-01-28 07:08:53 +08:00
|
|
|
b.stagesSemaphore = semaphore.NewWeighted(int64(len(stages)))
|
bud: teach --platform to take a list
Add a pkg/parse.PlatformsFromOptions() which understands a "variant"
value as an optional third value in an OS/ARCH[/VARIANT] argument value,
which accepts a comma-separated list of them, and which returns a list
of platforms.
Teach "from" and "pull" about the --platform option and add integration
tests for them, warning if --platform was given multiple values.
Add a define.BuildOptions.JobSemaphore which an imagebuildah executor
will use in preference to one that it might allocate for itself.
In main(), allocate a JobSemaphore if the number of jobs is not 0 (which
we treat as "unlimited", and continue to allow executors to do).
In addManifest(), take a lock on the manifest list's image ID so that we
don't overwrite changes that another thread might be making while we're
attempting to make changes to it. In main(), create an empty list if
the list doesn't already exist before we start down this path, so that
we don't get two threads trying to create that manifest list at the same
time later on. Two processes could still try to create the same list
twice, but it's an incremental improvement.
Finally, if we've been given multiple platforms to build for, run their
builds concurrently and gather up their results.
Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2021-06-22 22:52:49 +08:00
|
|
|
}
|
2020-06-22 16:53:02 +08:00
|
|
|
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
wg.Add(len(stages))
|
|
|
|
|
|
|
|
go func() {
|
imagebuildah: fix an attempt to write to a nil map
If the build for a single stage fails, we break out of the loop that's
iterating through all of the stages over in its own goroutine, and start
cleaning up after the stages that were already completed.
Because the function that launched that goroutine also calls its cleanup
function in non-error cases, the cleanup function sets the map that's
used to keep track of what needs to be cleaned up to `nil` after the
function finishes iterating through the map, so that we won't try to
clean up (a given thing that needs to be cleaned up) more than once.
Because the loop that's iterating through all of the stages is running
in its own goroutine, it doesn't stop when the function that started it
returns in error cases, so it would still attempt to build subsequent
stages. Have it check for cases where the map variable has already been
cleared, or if one of the stages that it's already run returned an
error. If the function that it calls to build the stage, using the map
variable as a parameter, is already running at that point, it'll have a
non-`nil` map, so it won't crash, but it might not be cleaned up
correctly, either.
If such a stage finishes, either successfully or with an error, the
goroutine would try to pass the result back to its parent(?) goroutine
over a channel that was no longer being read from, and it would stall,
never releasing the jobs semaphore. Because we started sharing that
semaphore across multiple-platform builds, builds for other platforms
would stall completely, and the whole build would stall. Make the
results channel into a buffered channel to allow it to not stall there.
Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2021-09-23 04:56:55 +08:00
|
|
|
cancel := false
|
2020-06-22 16:53:02 +08:00
|
|
|
for stageIndex := range stages {
|
|
|
|
index := stageIndex
|
2020-07-02 14:07:10 +08:00
|
|
|
// Acquire the semaphore before creating the goroutine so we are sure they
|
2020-06-22 16:53:02 +08:00
|
|
|
// run in the specified order.
|
|
|
|
if err := b.stagesSemaphore.Acquire(ctx, 1); err != nil {
|
imagebuildah: fix an attempt to write to a nil map
If the build for a single stage fails, we break out of the loop that's
iterating through all of the stages over in its own goroutine, and start
cleaning up after the stages that were already completed.
Because the function that launched that goroutine also calls its cleanup
function in non-error cases, the cleanup function sets the map that's
used to keep track of what needs to be cleaned up to `nil` after the
function finishes iterating through the map, so that we won't try to
clean up (a given thing that needs to be cleaned up) more than once.
Because the loop that's iterating through all of the stages is running
in its own goroutine, it doesn't stop when the function that started it
returns in error cases, so it would still attempt to build subsequent
stages. Have it check for cases where the map variable has already been
cleared, or if one of the stages that it's already run returned an
error. If the function that it calls to build the stage, using the map
variable as a parameter, is already running at that point, it'll have a
non-`nil` map, so it won't crash, but it might not be cleaned up
correctly, either.
If such a stage finishes, either successfully or with an error, the
goroutine would try to pass the result back to its parent(?) goroutine
over a channel that was no longer being read from, and it would stall,
never releasing the jobs semaphore. Because we started sharing that
semaphore across multiple-platform builds, builds for other platforms
would stall completely, and the whole build would stall. Make the
results channel into a buffered channel to allow it to not stall there.
Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2021-09-23 04:56:55 +08:00
|
|
|
cancel = true
|
2020-06-22 16:53:02 +08:00
|
|
|
b.lastError = err
|
imagebuildah: fix an attempt to write to a nil map
If the build for a single stage fails, we break out of the loop that's
iterating through all of the stages over in its own goroutine, and start
cleaning up after the stages that were already completed.
Because the function that launched that goroutine also calls its cleanup
function in non-error cases, the cleanup function sets the map that's
used to keep track of what needs to be cleaned up to `nil` after the
function finishes iterating through the map, so that we won't try to
clean up (a given thing that needs to be cleaned up) more than once.
Because the loop that's iterating through all of the stages is running
in its own goroutine, it doesn't stop when the function that started it
returns in error cases, so it would still attempt to build subsequent
stages. Have it check for cases where the map variable has already been
cleared, or if one of the stages that it's already run returned an
error. If the function that it calls to build the stage, using the map
variable as a parameter, is already running at that point, it'll have a
non-`nil` map, so it won't crash, but it might not be cleaned up
correctly, either.
If such a stage finishes, either successfully or with an error, the
goroutine would try to pass the result back to its parent(?) goroutine
over a channel that was no longer being read from, and it would stall,
never releasing the jobs semaphore. Because we started sharing that
semaphore across multiple-platform builds, builds for other platforms
would stall completely, and the whole build would stall. Make the
results channel into a buffered channel to allow it to not stall there.
Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2021-09-23 04:56:55 +08:00
|
|
|
ch <- Result{
|
|
|
|
Index: index,
|
|
|
|
Error: err,
|
|
|
|
}
|
|
|
|
wg.Done()
|
|
|
|
continue
|
2020-06-22 16:53:02 +08:00
|
|
|
}
|
imagebuildah: fix an attempt to write to a nil map
If the build for a single stage fails, we break out of the loop that's
iterating through all of the stages over in its own goroutine, and start
cleaning up after the stages that were already completed.
Because the function that launched that goroutine also calls its cleanup
function in non-error cases, the cleanup function sets the map that's
used to keep track of what needs to be cleaned up to `nil` after the
function finishes iterating through the map, so that we won't try to
clean up (a given thing that needs to be cleaned up) more than once.
Because the loop that's iterating through all of the stages is running
in its own goroutine, it doesn't stop when the function that started it
returns in error cases, so it would still attempt to build subsequent
stages. Have it check for cases where the map variable has already been
cleared, or if one of the stages that it's already run returned an
error. If the function that it calls to build the stage, using the map
variable as a parameter, is already running at that point, it'll have a
non-`nil` map, so it won't crash, but it might not be cleaned up
correctly, either.
If such a stage finishes, either successfully or with an error, the
goroutine would try to pass the result back to its parent(?) goroutine
over a channel that was no longer being read from, and it would stall,
never releasing the jobs semaphore. Because we started sharing that
semaphore across multiple-platform builds, builds for other platforms
would stall completely, and the whole build would stall. Make the
results channel into a buffered channel to allow it to not stall there.
Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2021-09-23 04:56:55 +08:00
|
|
|
b.stagesLock.Lock()
|
|
|
|
cleanupStages := cleanupStages
|
|
|
|
b.stagesLock.Unlock()
|
2020-06-22 16:53:02 +08:00
|
|
|
go func() {
|
|
|
|
defer b.stagesSemaphore.Release(1)
|
|
|
|
defer wg.Done()
|
imagebuildah: fix an attempt to write to a nil map
If the build for a single stage fails, we break out of the loop that's
iterating through all of the stages over in its own goroutine, and start
cleaning up after the stages that were already completed.
Because the function that launched that goroutine also calls its cleanup
function in non-error cases, the cleanup function sets the map that's
used to keep track of what needs to be cleaned up to `nil` after the
function finishes iterating through the map, so that we won't try to
clean up (a given thing that needs to be cleaned up) more than once.
Because the loop that's iterating through all of the stages is running
in its own goroutine, it doesn't stop when the function that started it
returns in error cases, so it would still attempt to build subsequent
stages. Have it check for cases where the map variable has already been
cleared, or if one of the stages that it's already run returned an
error. If the function that it calls to build the stage, using the map
variable as a parameter, is already running at that point, it'll have a
non-`nil` map, so it won't crash, but it might not be cleaned up
correctly, either.
If such a stage finishes, either successfully or with an error, the
goroutine would try to pass the result back to its parent(?) goroutine
over a channel that was no longer being read from, and it would stall,
never releasing the jobs semaphore. Because we started sharing that
semaphore across multiple-platform builds, builds for other platforms
would stall completely, and the whole build would stall. Make the
results channel into a buffered channel to allow it to not stall there.
Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2021-09-23 04:56:55 +08:00
|
|
|
if cancel || cleanupStages == nil {
|
|
|
|
var err error
|
|
|
|
if stages[index].Name != strconv.Itoa(index) {
|
2022-07-06 17:14:06 +08:00
|
|
|
err = fmt.Errorf("not building stage %d: build canceled", index)
|
imagebuildah: fix an attempt to write to a nil map
If the build for a single stage fails, we break out of the loop that's
iterating through all of the stages over in its own goroutine, and start
cleaning up after the stages that were already completed.
Because the function that launched that goroutine also calls its cleanup
function in non-error cases, the cleanup function sets the map that's
used to keep track of what needs to be cleaned up to `nil` after the
function finishes iterating through the map, so that we won't try to
clean up (a given thing that needs to be cleaned up) more than once.
Because the loop that's iterating through all of the stages is running
in its own goroutine, it doesn't stop when the function that started it
returns in error cases, so it would still attempt to build subsequent
stages. Have it check for cases where the map variable has already been
cleared, or if one of the stages that it's already run returned an
error. If the function that it calls to build the stage, using the map
variable as a parameter, is already running at that point, it'll have a
non-`nil` map, so it won't crash, but it might not be cleaned up
correctly, either.
If such a stage finishes, either successfully or with an error, the
goroutine would try to pass the result back to its parent(?) goroutine
over a channel that was no longer being read from, and it would stall,
never releasing the jobs semaphore. Because we started sharing that
semaphore across multiple-platform builds, builds for other platforms
would stall completely, and the whole build would stall. Make the
results channel into a buffered channel to allow it to not stall there.
Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2021-09-23 04:56:55 +08:00
|
|
|
} else {
|
2022-07-06 17:14:06 +08:00
|
|
|
err = fmt.Errorf("not building stage %d (%s): build canceled", index, stages[index].Name)
|
imagebuildah: fix an attempt to write to a nil map
If the build for a single stage fails, we break out of the loop that's
iterating through all of the stages over in its own goroutine, and start
cleaning up after the stages that were already completed.
Because the function that launched that goroutine also calls its cleanup
function in non-error cases, the cleanup function sets the map that's
used to keep track of what needs to be cleaned up to `nil` after the
function finishes iterating through the map, so that we won't try to
clean up (a given thing that needs to be cleaned up) more than once.
Because the loop that's iterating through all of the stages is running
in its own goroutine, it doesn't stop when the function that started it
returns in error cases, so it would still attempt to build subsequent
stages. Have it check for cases where the map variable has already been
cleared, or if one of the stages that it's already run returned an
error. If the function that it calls to build the stage, using the map
variable as a parameter, is already running at that point, it'll have a
non-`nil` map, so it won't crash, but it might not be cleaned up
correctly, either.
If such a stage finishes, either successfully or with an error, the
goroutine would try to pass the result back to its parent(?) goroutine
over a channel that was no longer being read from, and it would stall,
never releasing the jobs semaphore. Because we started sharing that
semaphore across multiple-platform builds, builds for other platforms
would stall completely, and the whole build would stall. Make the
results channel into a buffered channel to allow it to not stall there.
Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2021-09-23 04:56:55 +08:00
|
|
|
}
|
|
|
|
ch <- Result{
|
|
|
|
Index: index,
|
|
|
|
Error: err,
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
2022-05-19 19:09:59 +08:00
|
|
|
// Skip stage if it is not needed by TargetStage
|
2022-09-15 18:00:23 +08:00
|
|
|
// or any of its dependency stages and `SkipUnusedStages`
|
|
|
|
// is not set to `false`.
|
2022-05-19 19:09:59 +08:00
|
|
|
if stageDependencyInfo, ok := dependencyMap[stages[index].Name]; ok {
|
2022-09-15 18:00:23 +08:00
|
|
|
if !stageDependencyInfo.NeededByTarget && b.skipUnusedStages != types.OptionalBoolFalse {
|
2022-05-19 19:09:59 +08:00
|
|
|
logrus.Debugf("Skipping stage with Name %q and index %d since its not needed by the target stage", stages[index].Name, index)
|
|
|
|
ch <- Result{
|
|
|
|
Index: index,
|
|
|
|
Error: nil,
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
2023-10-09 16:29:52 +08:00
|
|
|
stageID, stageRef, stageOnlyBaseImage, stageErr := b.buildStage(ctx, cleanupStages, stages, index)
|
2020-07-15 03:37:58 +08:00
|
|
|
if stageErr != nil {
|
imagebuildah: fix an attempt to write to a nil map
If the build for a single stage fails, we break out of the loop that's
iterating through all of the stages over in its own goroutine, and start
cleaning up after the stages that were already completed.
Because the function that launched that goroutine also calls its cleanup
function in non-error cases, the cleanup function sets the map that's
used to keep track of what needs to be cleaned up to `nil` after the
function finishes iterating through the map, so that we won't try to
clean up (a given thing that needs to be cleaned up) more than once.
Because the loop that's iterating through all of the stages is running
in its own goroutine, it doesn't stop when the function that started it
returns in error cases, so it would still attempt to build subsequent
stages. Have it check for cases where the map variable has already been
cleared, or if one of the stages that it's already run returned an
error. If the function that it calls to build the stage, using the map
variable as a parameter, is already running at that point, it'll have a
non-`nil` map, so it won't crash, but it might not be cleaned up
correctly, either.
If such a stage finishes, either successfully or with an error, the
goroutine would try to pass the result back to its parent(?) goroutine
over a channel that was no longer being read from, and it would stall,
never releasing the jobs semaphore. Because we started sharing that
semaphore across multiple-platform builds, builds for other platforms
would stall completely, and the whole build would stall. Make the
results channel into a buffered channel to allow it to not stall there.
Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2021-09-23 04:56:55 +08:00
|
|
|
cancel = true
|
2020-06-22 16:53:02 +08:00
|
|
|
ch <- Result{
|
2023-10-09 16:29:52 +08:00
|
|
|
Index: index,
|
|
|
|
Error: stageErr,
|
|
|
|
OnlyBaseImage: stageOnlyBaseImage,
|
2020-06-22 16:53:02 +08:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
ch <- Result{
|
2023-10-09 16:29:52 +08:00
|
|
|
Index: index,
|
|
|
|
ImageID: stageID,
|
|
|
|
Ref: stageRef,
|
|
|
|
OnlyBaseImage: stageOnlyBaseImage,
|
|
|
|
Error: nil,
|
2020-06-22 16:53:02 +08:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
go func() {
|
|
|
|
wg.Wait()
|
|
|
|
close(ch)
|
|
|
|
}()
|
|
|
|
|
|
|
|
for r := range ch {
|
|
|
|
stage := stages[r.Index]
|
|
|
|
|
|
|
|
b.stagesLock.Lock()
|
2021-08-17 01:51:46 +08:00
|
|
|
b.terminatedStage[stage.Name] = r.Error
|
2021-09-28 05:25:06 +08:00
|
|
|
b.terminatedStage[strconv.Itoa(stage.Position)] = r.Error
|
2020-06-22 16:53:02 +08:00
|
|
|
|
|
|
|
if r.Error != nil {
|
2021-08-17 01:51:46 +08:00
|
|
|
b.stagesLock.Unlock()
|
2020-06-22 16:53:02 +08:00
|
|
|
b.lastError = r.Error
|
|
|
|
return "", nil, r.Error
|
2019-06-28 16:43:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// If this is an intermediate stage, make a note of the ID, so
|
|
|
|
// that we can look it up later.
|
2020-06-22 16:53:02 +08:00
|
|
|
if r.Index < len(stages)-1 && r.ImageID != "" {
|
|
|
|
b.imageMap[stage.Name] = r.ImageID
|
2019-06-28 16:43:50 +08:00
|
|
|
// We're not populating the cache with intermediate
|
|
|
|
// images, so add this one to the list of images that
|
|
|
|
// we'll remove later.
|
2023-10-09 16:29:52 +08:00
|
|
|
// Only remove intermediate image is `--layers` is not provided
|
|
|
|
// or following stage was not only a base image ( i.e a different image ).
|
|
|
|
if !b.layers && !r.OnlyBaseImage {
|
2020-06-22 16:53:02 +08:00
|
|
|
cleanupImages = append(cleanupImages, r.ImageID)
|
2019-06-28 16:43:50 +08:00
|
|
|
}
|
2020-06-22 16:53:02 +08:00
|
|
|
}
|
|
|
|
if r.Index == len(stages)-1 {
|
|
|
|
imageID = r.ImageID
|
2020-07-15 03:37:58 +08:00
|
|
|
ref = r.Ref
|
2019-06-28 16:43:50 +08:00
|
|
|
}
|
2021-08-17 01:51:46 +08:00
|
|
|
b.stagesLock.Unlock()
|
2019-06-28 16:43:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if len(b.unusedArgs) > 0 {
|
|
|
|
unusedList := make([]string, 0, len(b.unusedArgs))
|
|
|
|
for k := range b.unusedArgs {
|
|
|
|
unusedList = append(unusedList, k)
|
|
|
|
}
|
2025-01-23 22:27:47 +08:00
|
|
|
slices.Sort(unusedList)
|
2019-06-28 16:43:50 +08:00
|
|
|
fmt.Fprintf(b.out, "[Warning] one or more build args were not consumed: %v\n", unusedList)
|
|
|
|
}
|
|
|
|
|
2021-04-20 03:26:05 +08:00
|
|
|
// Add additional tags and print image names recorded in storage
|
|
|
|
if dest, err := b.resolveNameToImageRef(b.output); err == nil {
|
|
|
|
switch dest.Transport().Name() {
|
2023-06-26 16:40:39 +08:00
|
|
|
case storageTransport.Transport.Name():
|
2023-11-02 13:46:48 +08:00
|
|
|
_, img, err := storageTransport.ResolveReference(dest)
|
2021-04-20 03:26:05 +08:00
|
|
|
if err != nil {
|
2022-09-18 18:36:08 +08:00
|
|
|
return imageID, ref, fmt.Errorf("locating just-written image %q: %w", transports.ImageName(dest), err)
|
2021-04-20 03:26:05 +08:00
|
|
|
}
|
|
|
|
if len(b.additionalTags) > 0 {
|
2021-04-23 16:26:26 +08:00
|
|
|
if err = util.AddImageNames(b.store, "", b.systemContext, img, b.additionalTags); err != nil {
|
2022-09-18 18:36:08 +08:00
|
|
|
return imageID, ref, fmt.Errorf("setting image names to %v: %w", append(img.Names, b.additionalTags...), err)
|
2019-06-28 16:43:50 +08:00
|
|
|
}
|
|
|
|
logrus.Debugf("assigned names %v to image %q", img.Names, img.ID)
|
2021-04-20 03:26:05 +08:00
|
|
|
}
|
|
|
|
// Report back the caller the tags applied, if any.
|
2023-11-02 13:46:48 +08:00
|
|
|
_, img, err = storageTransport.ResolveReference(dest)
|
2021-04-20 03:26:05 +08:00
|
|
|
if err != nil {
|
2022-09-18 18:36:08 +08:00
|
|
|
return imageID, ref, fmt.Errorf("locating just-written image %q: %w", transports.ImageName(dest), err)
|
2021-04-20 03:26:05 +08:00
|
|
|
}
|
|
|
|
for _, name := range img.Names {
|
|
|
|
fmt.Fprintf(b.out, "Successfully tagged %s\n", name)
|
|
|
|
}
|
|
|
|
|
|
|
|
default:
|
|
|
|
if len(b.additionalTags) > 0 {
|
2021-05-08 01:38:44 +08:00
|
|
|
b.logger.Warnf("don't know how to add tags to images stored in %q transport", dest.Transport().Name())
|
2019-06-28 16:43:50 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := cleanup(); err != nil {
|
|
|
|
return "", nil, err
|
|
|
|
}
|
2020-01-31 01:31:15 +08:00
|
|
|
logrus.Debugf("printing final image id %q", imageID)
|
2019-06-28 16:43:50 +08:00
|
|
|
if b.iidfile != "" {
|
2024-08-16 00:50:07 +08:00
|
|
|
if err = os.WriteFile(b.iidfile, []byte("sha256:"+imageID), 0o644); err != nil {
|
2022-07-06 17:14:06 +08:00
|
|
|
return imageID, ref, fmt.Errorf("failed to write image ID to file %q: %w", b.iidfile, err)
|
2019-06-28 16:43:50 +08:00
|
|
|
}
|
2019-11-05 21:51:46 +08:00
|
|
|
} else {
|
|
|
|
if _, err := stdout.Write([]byte(imageID + "\n")); err != nil {
|
2022-07-06 17:14:06 +08:00
|
|
|
return imageID, ref, fmt.Errorf("failed to write image ID to stdout: %w", err)
|
2019-11-05 21:51:46 +08:00
|
|
|
}
|
2019-06-28 16:43:50 +08:00
|
|
|
}
|
|
|
|
return imageID, ref, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// deleteSuccessfulIntermediateCtrs goes through the container IDs in each
|
|
|
|
// stage's containerIDs list and deletes the containers associated with those
|
|
|
|
// IDs.
|
|
|
|
func (b *Executor) deleteSuccessfulIntermediateCtrs() error {
|
|
|
|
var lastErr error
|
|
|
|
for _, s := range b.stages {
|
|
|
|
for _, ctr := range s.containerIDs {
|
|
|
|
if err := b.store.DeleteContainer(ctr); err != nil {
|
2021-05-08 01:38:44 +08:00
|
|
|
b.logger.Errorf("error deleting build container %q: %v\n", ctr, err)
|
2019-06-28 16:43:50 +08:00
|
|
|
lastErr = err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// The stages map includes some stages under multiple keys, so
|
|
|
|
// clearing their lists after we process a given stage is
|
|
|
|
// necessary to avoid triggering errors that would occur if we
|
|
|
|
// tried to delete a given stage's containers multiple times.
|
|
|
|
s.containerIDs = nil
|
|
|
|
}
|
|
|
|
return lastErr
|
|
|
|
}
|