Merge pull request #6380 from nalind/go1.24

Update to Go 1.24
This commit is contained in:
openshift-merge-bot[bot] 2025-09-15 18:41:48 +00:00 committed by GitHub
commit 48ac5410cb
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
18 changed files with 64 additions and 67 deletions

View File

@ -35,7 +35,7 @@ env:
DEBIAN_NAME: "debian-13" DEBIAN_NAME: "debian-13"
# Image identifiers # Image identifiers
IMAGE_SUFFIX: "c20250812t173301z-f42f41d13" IMAGE_SUFFIX: "c20250910t092246z-f42f41d13"
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}" FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}" PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}"
DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}" DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}"
@ -124,7 +124,7 @@ vendor_task:
# Runs within Cirrus's "community cluster" # Runs within Cirrus's "community cluster"
container: container:
image: docker.io/library/golang:1.23.3 image: docker.io/library/golang:1.24.0
cpu: 1 cpu: 1
memory: 1 memory: 1

View File

@ -132,8 +132,8 @@ type Builder struct {
ImageHistoryComment string `json:"history-comment,omitempty"` ImageHistoryComment string `json:"history-comment,omitempty"`
// Image metadata and runtime settings, in multiple formats. // Image metadata and runtime settings, in multiple formats.
OCIv1 v1.Image `json:"ociv1,omitempty"` OCIv1 v1.Image `json:"ociv1"`
Docker docker.V2Image `json:"docker,omitempty"` Docker docker.V2Image `json:"docker"`
// DefaultMountsFilePath is the file path holding the mounts to be mounted in "host-path:container-path" format. // DefaultMountsFilePath is the file path holding the mounts to be mounted in "host-path:container-path" format.
DefaultMountsFilePath string `json:"defaultMountsFilePath,omitempty"` DefaultMountsFilePath string `json:"defaultMountsFilePath,omitempty"`

View File

@ -277,8 +277,8 @@ func containerOutputHeader(truncate bool) {
func parseCtrFilter(filter string) (*containerFilterParams, error) { func parseCtrFilter(filter string) (*containerFilterParams, error) {
params := new(containerFilterParams) params := new(containerFilterParams)
filters := strings.Split(filter, ",") filters := strings.SplitSeq(filter, ",")
for _, param := range filters { for param := range filters {
pair := strings.SplitN(param, "=", 2) pair := strings.SplitN(param, "=", 2)
if len(pair) != 2 { if len(pair) != 2 {
return nil, fmt.Errorf("incorrect filter value %q, should be of form filter=value", param) return nil, fmt.Errorf("incorrect filter value %q, should be of form filter=value", param)

View File

@ -162,13 +162,13 @@ type request struct {
preservedDirectory string preservedDirectory string
Globs []string `json:",omitempty"` // used by stat, get Globs []string `json:",omitempty"` // used by stat, get
preservedGlobs []string preservedGlobs []string
StatOptions StatOptions `json:",omitempty"` StatOptions StatOptions
GetOptions GetOptions `json:",omitempty"` GetOptions GetOptions
PutOptions PutOptions `json:",omitempty"` PutOptions PutOptions
MkdirOptions MkdirOptions `json:",omitempty"` MkdirOptions MkdirOptions
RemoveOptions RemoveOptions `json:",omitempty"` RemoveOptions RemoveOptions
EnsureOptions EnsureOptions `json:",omitempty"` EnsureOptions EnsureOptions
ConditionalRemoveOptions ConditionalRemoveOptions `json:",omitempty"` ConditionalRemoveOptions ConditionalRemoveOptions
} }
func (req *request) Excludes() []string { func (req *request) Excludes() []string {
@ -249,14 +249,14 @@ func (req *request) GIDMap() []idtools.IDMap {
// Response encodes a single response. // Response encodes a single response.
type response struct { type response struct {
Error string `json:",omitempty"` Error string `json:",omitempty"`
Stat statResponse `json:",omitempty"` Stat statResponse
Eval evalResponse `json:",omitempty"` Eval evalResponse
Get getResponse `json:",omitempty"` Get getResponse
Put putResponse `json:",omitempty"` Put putResponse
Mkdir mkdirResponse `json:",omitempty"` Mkdir mkdirResponse
Remove removeResponse `json:",omitempty"` Remove removeResponse
Ensure ensureResponse `json:",omitempty"` Ensure ensureResponse
ConditionalRemove conditionalRemoveResponse `json:",omitempty"` ConditionalRemove conditionalRemoveResponse
} }
// statResponse encodes a response for a single Stat request. // statResponse encodes a response for a single Stat request.
@ -801,7 +801,7 @@ func copierWithSubprocess(bulkReader io.Reader, bulkWriter io.Writer, req reques
} }
loggedOutput := strings.TrimSuffix(errorBuffer.String(), "\n") loggedOutput := strings.TrimSuffix(errorBuffer.String(), "\n")
if len(loggedOutput) > 0 { if len(loggedOutput) > 0 {
for _, output := range strings.Split(loggedOutput, "\n") { for output := range strings.SplitSeq(loggedOutput, "\n") {
logrus.Debug(output) logrus.Debug(output)
} }
} }
@ -1588,8 +1588,8 @@ func mapWithPrefixedKeysWithoutKeyPrefix[K any](m map[string]K, p string) map[st
} }
cloned := make(map[string]K, len(m)) cloned := make(map[string]K, len(m))
for k, v := range m { for k, v := range m {
if strings.HasPrefix(k, p) { if after, ok := strings.CutPrefix(k, p); ok {
cloned[strings.TrimPrefix(k, p)] = v cloned[after] = v
} }
} }
return cloned return cloned
@ -1819,7 +1819,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
return fmt.Errorf("%q is not a subdirectory of %q: %w", directory, req.Root, err) return fmt.Errorf("%q is not a subdirectory of %q: %w", directory, req.Root, err)
} }
subdir := "" subdir := ""
for _, component := range strings.Split(rel, string(os.PathSeparator)) { for component := range strings.SplitSeq(rel, string(os.PathSeparator)) {
subdir = filepath.Join(subdir, component) subdir = filepath.Join(subdir, component)
path := filepath.Join(req.Root, subdir) path := filepath.Join(req.Root, subdir)
if err := os.Mkdir(path, 0o700); err == nil { if err := os.Mkdir(path, 0o700); err == nil {
@ -2187,7 +2187,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
} }
func copierHandlerMkdir(req request, idMappings *idtools.IDMappings) (*response, func() error, error) { func copierHandlerMkdir(req request, idMappings *idtools.IDMappings) (*response, func() error, error) {
errorResponse := func(fmtspec string, args ...any) (*response, func() error, error) { errorResponse := func(fmtspec string, args ...any) (*response, func() error, error) { //nolint:unparam
return &response{Error: fmt.Sprintf(fmtspec, args...), Mkdir: mkdirResponse{}}, nil, nil return &response{Error: fmt.Sprintf(fmtspec, args...), Mkdir: mkdirResponse{}}, nil, nil
} }
dirUID, dirGID := 0, 0 dirUID, dirGID := 0, 0
@ -2219,7 +2219,7 @@ func copierHandlerMkdir(req request, idMappings *idtools.IDMappings) (*response,
subdir := "" subdir := ""
var created []string var created []string
for _, component := range strings.Split(rel, string(os.PathSeparator)) { for component := range strings.SplitSeq(rel, string(os.PathSeparator)) {
subdir = filepath.Join(subdir, component) subdir = filepath.Join(subdir, component)
path := filepath.Join(req.Root, subdir) path := filepath.Join(req.Root, subdir)
if err := os.Mkdir(path, 0o700); err == nil { if err := os.Mkdir(path, 0o700); err == nil {

View File

@ -65,7 +65,7 @@ func Lgetxattrs(path string) (map[string]string, error) {
return nil, fmt.Errorf("unable to read list of attributes for %q: size would have been too big", path) return nil, fmt.Errorf("unable to read list of attributes for %q: size would have been too big", path)
} }
m := make(map[string]string) m := make(map[string]string)
for _, attribute := range strings.Split(string(list), string('\000')) { for attribute := range strings.SplitSeq(string(list), string('\000')) {
if isRelevantXattr(attribute) { if isRelevantXattr(attribute) {
attributeSize := initialXattrValueSize attributeSize := initialXattrValueSize
var attributeValue []byte var attributeValue []byte

View File

@ -124,7 +124,7 @@ type V1Compatibility struct {
Created time.Time `json:"created"` Created time.Time `json:"created"`
ContainerConfig struct { ContainerConfig struct {
Cmd []string Cmd []string
} `json:"container_config,omitempty"` } `json:"container_config"`
Author string `json:"author,omitempty"` Author string `json:"author,omitempty"`
ThrowAway bool `json:"throwaway,omitempty"` ThrowAway bool `json:"throwaway,omitempty"`
} }
@ -143,7 +143,7 @@ type V1Image struct {
// Container is the id of the container used to commit // Container is the id of the container used to commit
Container string `json:"container,omitempty"` Container string `json:"container,omitempty"`
// ContainerConfig is the configuration of the container that is committed into the image // ContainerConfig is the configuration of the container that is committed into the image
ContainerConfig Config `json:"container_config,omitempty"` ContainerConfig Config `json:"container_config"`
// DockerVersion specifies the version of Docker that was used to build the image // DockerVersion specifies the version of Docker that was used to build the image
DockerVersion string `json:"docker_version,omitempty"` DockerVersion string `json:"docker_version,omitempty"`
// Author is the name of the author that was specified when committing the image // Author is the name of the author that was specified when committing the image

2
go.mod
View File

@ -2,7 +2,7 @@ module github.com/containers/buildah
// Warning: Ensure the "go" and "toolchain" versions match exactly to prevent unwanted auto-updates // Warning: Ensure the "go" and "toolchain" versions match exactly to prevent unwanted auto-updates
go 1.23.3 go 1.24.0
require ( require (
github.com/containerd/platforms v1.0.0-rc.1 github.com/containerd/platforms v1.0.0-rc.1

View File

@ -836,12 +836,12 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
} }
case "ADD", "COPY": case "ADD", "COPY":
for _, flag := range child.Flags { // flags for this instruction for _, flag := range child.Flags { // flags for this instruction
if strings.HasPrefix(flag, "--from=") { if after, ok := strings.CutPrefix(flag, "--from="); ok {
// TODO: this didn't undergo variable and // TODO: this didn't undergo variable and
// arg expansion, so if the previous stage // arg expansion, so if the previous stage
// was named using argument values, we might // was named using argument values, we might
// not record the right value here. // not record the right value here.
rootfs := strings.TrimPrefix(flag, "--from=") rootfs := after
b.rootfsMap[rootfs] = struct{}{} b.rootfsMap[rootfs] = struct{}{}
logrus.Debugf("rootfs needed for COPY in stage %d: %q", stageIndex, rootfs) logrus.Debugf("rootfs needed for COPY in stage %d: %q", stageIndex, rootfs)
// Populate dependency tree and check // Populate dependency tree and check
@ -885,8 +885,8 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
// dependency calculation. // dependency calculation.
if strings.HasPrefix(flag, "--mount=") && strings.Contains(flag, "from") { if strings.HasPrefix(flag, "--mount=") && strings.Contains(flag, "from") {
mountFlags := strings.TrimPrefix(flag, "--mount=") mountFlags := strings.TrimPrefix(flag, "--mount=")
fields := strings.Split(mountFlags, ",") fields := strings.SplitSeq(mountFlags, ",")
for _, field := range fields { for field := range fields {
if mountFrom, hasFrom := strings.CutPrefix(field, "from="); hasFrom { if mountFrom, hasFrom := strings.CutPrefix(field, "from="); hasFrom {
// Check if this base is a stage if yes // Check if this base is a stage if yes
// add base to current stage's dependency tree // add base to current stage's dependency tree

View File

@ -1913,7 +1913,7 @@ func (s *StageExecutor) getCreatedBy(node *parser.Node, addedContentSummary stri
switch command { switch command {
case "ARG": case "ARG":
for _, variable := range strings.Fields(node.Original) { for variable := range strings.FieldsSeq(node.Original) {
if variable != "ARG" { if variable != "ARG" {
s.argsFromContainerfile = append(s.argsFromContainerfile, variable) s.argsFromContainerfile = append(s.argsFromContainerfile, variable)
} }

View File

@ -183,11 +183,11 @@ func getHostDistributionInfo() map[string]string {
l := bufio.NewScanner(f) l := bufio.NewScanner(f)
for l.Scan() { for l.Scan() {
if strings.HasPrefix(l.Text(), "ID=") { if after, ok := strings.CutPrefix(l.Text(), "ID="); ok {
dist["Distribution"] = strings.TrimPrefix(l.Text(), "ID=") dist["Distribution"] = after
} }
if strings.HasPrefix(l.Text(), "VERSION_ID=") { if after, ok := strings.CutPrefix(l.Text(), "VERSION_ID="); ok {
dist["Version"] = strings.Trim(strings.TrimPrefix(l.Text(), "VERSION_ID="), "\"") dist["Version"] = strings.Trim(after, "\"")
} }
} }
return dist return dist

View File

@ -2,6 +2,7 @@ package config
import ( import (
"fmt" "fmt"
"maps"
"os" "os"
"slices" "slices"
"strings" "strings"
@ -136,9 +137,7 @@ func OverrideOCI(oconfig *v1.ImageConfig, overrideChanges []string, overrideConf
if oconfig.Labels == nil { if oconfig.Labels == nil {
oconfig.Labels = make(map[string]string) oconfig.Labels = make(map[string]string)
} }
for k, v := range overrideConfig.Labels { maps.Copy(oconfig.Labels, overrideConfig.Labels)
oconfig.Labels[k] = v
}
} }
oconfig.StopSignal = overrideConfig.StopSignal oconfig.StopSignal = overrideConfig.StopSignal
} }
@ -206,9 +205,7 @@ func OverrideDocker(dconfig *docker.Config, overrideChanges []string, overrideCo
if dconfig.Labels == nil { if dconfig.Labels == nil {
dconfig.Labels = make(map[string]string) dconfig.Labels = make(map[string]string)
} }
for k, v := range overrideConfig.Labels { maps.Copy(dconfig.Labels, overrideConfig.Labels)
dconfig.Labels[k] = v
}
} }
dconfig.StopSignal = overrideConfig.StopSignal dconfig.StopSignal = overrideConfig.StopSignal
dconfig.StopTimeout = overrideConfig.StopTimeout dconfig.StopTimeout = overrideConfig.StopTimeout

View File

@ -543,7 +543,7 @@ func slop(size int64, slop string) int64 {
if slop == "" { if slop == "" {
return size * 5 / 4 return size * 5 / 4
} }
for _, factor := range strings.Split(slop, "+") { for factor := range strings.SplitSeq(slop, "+") {
factor = strings.TrimSpace(factor) factor = strings.TrimSpace(factor)
if factor == "" { if factor == "" {
continue continue

View File

@ -240,8 +240,8 @@ func GenerateMeasurement(workloadConfig WorkloadConfig, firmwareLibrary string)
scanner := bufio.NewScanner(&stdout) scanner := bufio.NewScanner(&stdout)
for scanner.Scan() { for scanner.Scan() {
line := scanner.Text() line := scanner.Text()
if strings.HasPrefix(line, prefix+":") { if after, ok := strings.CutPrefix(line, prefix+":"); ok {
return strings.TrimSpace(strings.TrimPrefix(line, prefix+":")), nil return strings.TrimSpace(after), nil
} }
} }
return "", fmt.Errorf("generating measurement: no line starting with %q found in output from krunfw_measurement", prefix+":") return "", fmt.Errorf("generating measurement: no line starting with %q found in output from krunfw_measurement", prefix+":")

View File

@ -202,7 +202,7 @@ func Merge(mergeStrategy define.SBOMMergeStrategy, inputOutputSBOM, inputSBOM, o
Dependencies []string `json:"dependencies,omitempty"` Dependencies []string `json:"dependencies,omitempty"`
} }
type purlDocument struct { type purlDocument struct {
ImageContents purlImageContents `json:"image_contents,omitempty"` ImageContents purlImageContents `json:"image_contents"`
} }
purls := []string{} purls := []string{}
seenPurls := make(map[string]struct{}) seenPurls := make(map[string]struct{})

View File

@ -474,7 +474,7 @@ func readBuildArgFile(buildargfile string, args map[string]string) error {
if err != nil { if err != nil {
return err return err
} }
for _, arg := range strings.Split(string(argfile), "\n") { for arg := range strings.SplitSeq(string(argfile), "\n") {
if len(arg) == 0 || arg[0] == '#' { if len(arg) == 0 || arg[0] == '#' {
continue continue
} }

View File

@ -733,7 +733,7 @@ func GetBuildOutput(buildOutput string) (define.BuildOutputOption, error) {
isStdout := false isStdout := false
typeSelected := "" typeSelected := ""
pathSelected := "" pathSelected := ""
for _, option := range strings.Split(buildOutput, ",") { for option := range strings.SplitSeq(buildOutput, ",") {
key, value, found := strings.Cut(option, "=") key, value, found := strings.Cut(option, "=")
if !found { if !found {
return define.BuildOutputOption{}, fmt.Errorf("invalid build output options %q, expected format key=value", buildOutput) return define.BuildOutputOption{}, fmt.Errorf("invalid build output options %q, expected format key=value", buildOutput)
@ -789,7 +789,7 @@ func GetConfidentialWorkloadOptions(arg string) (define.ConfidentialWorkloadOpti
TempDir: GetTempDir(), TempDir: GetTempDir(),
} }
defaults := options defaults := options
for _, option := range strings.Split(arg, ",") { for option := range strings.SplitSeq(arg, ",") {
var err error var err error
switch { switch {
case strings.HasPrefix(option, "type="): case strings.HasPrefix(option, "type="):
@ -936,7 +936,7 @@ func GetAutoOptions(base string) (*storageTypes.AutoUserNsOptions, error) {
if len(parts) == 1 { if len(parts) == 1 {
return &options, nil return &options, nil
} }
for _, o := range strings.Split(parts[1], ",") { for o := range strings.SplitSeq(parts[1], ",") {
v := strings.SplitN(o, "=", 2) v := strings.SplitN(o, "=", 2)
if len(v) != 2 { if len(v) != 2 {
return nil, fmt.Errorf("invalid option specified: %q", o) return nil, fmt.Errorf("invalid option specified: %q", o)

View File

@ -672,10 +672,10 @@ func buildUsingDocker(ctx context.Context, t *testing.T, client *docker.Client,
// read the Dockerfile so that we can pull base images // read the Dockerfile so that we can pull base images
dockerfileContent, err := os.ReadFile(dockerfileName) dockerfileContent, err := os.ReadFile(dockerfileName)
require.NoErrorf(t, err, "reading dockerfile %q", dockerfileName) require.NoErrorf(t, err, "reading dockerfile %q", dockerfileName)
for _, line := range strings.Split(string(dockerfileContent), "\n") { for line := range strings.SplitSeq(string(dockerfileContent), "\n") {
line = strings.TrimSpace(line) line = strings.TrimSpace(line)
if strings.HasPrefix(line, "# syntax=") { if after, ok := strings.CutPrefix(line, "# syntax="); ok {
pullImageIfMissing(t, client, strings.TrimPrefix(line, "# syntax=")) pullImageIfMissing(t, client, after)
} }
} }
parsed, err := imagebuilder.ParseDockerfile(bytes.NewReader(dockerfileContent)) parsed, err := imagebuilder.ParseDockerfile(bytes.NewReader(dockerfileContent))
@ -880,7 +880,7 @@ func buildPost(t *testing.T, test testCase, err error, buildTool, outputString,
// FSTree holds the information we have about an image's filesystem // FSTree holds the information we have about an image's filesystem
type FSTree struct { type FSTree struct {
Layers []Layer `json:"layers,omitempty"` Layers []Layer `json:"layers,omitempty"`
Tree FSEntry `json:"tree,omitempty"` Tree FSEntry `json:"tree"`
} }
// Layer keeps track of the digests and contents of a layer blob // Layer keeps track of the digests and contents of a layer blob
@ -900,7 +900,7 @@ type FSHeader struct {
Mode int64 `json:"mode,omitempty"` Mode int64 `json:"mode,omitempty"`
UID int `json:"uid"` UID int `json:"uid"`
GID int `json:"gid"` GID int `json:"gid"`
ModTime time.Time `json:"mtime,omitempty"` ModTime time.Time `json:"mtime"`
Devmajor int64 `json:"devmajor,omitempty"` Devmajor int64 `json:"devmajor,omitempty"`
Devminor int64 `json:"devminor,omitempty"` Devminor int64 `json:"devminor,omitempty"`
Xattrs map[string]string `json:"xattrs,omitempty"` Xattrs map[string]string `json:"xattrs,omitempty"`
@ -1131,8 +1131,8 @@ func applyLayerToFSTree(t *testing.T, layer *Layer, root *FSEntry) {
} }
// if the item is a whiteout, strip the "this is a whiteout // if the item is a whiteout, strip the "this is a whiteout
// entry" prefix and remove the item it names // entry" prefix and remove the item it names
if strings.HasPrefix(base, ".wh.") { if after, ok := strings.CutPrefix(base, ".wh."); ok {
delete(dirEntry.Children, strings.TrimPrefix(base, ".wh.")) delete(dirEntry.Children, after)
continue continue
} }
// if the item already exists, make sure we don't get confused // if the item already exists, make sure we don't get confused
@ -1281,8 +1281,8 @@ func compareJSON(a, b map[string]any, skip []string) (missKeys, leftKeys, diffKe
var nextSkip []string var nextSkip []string
prefix := k + ":" prefix := k + ":"
for _, s := range skip { for _, s := range skip {
if strings.HasPrefix(s, prefix) { if after, ok0 := strings.CutPrefix(s, prefix); ok0 {
nextSkip = append(nextSkip, strings.TrimPrefix(s, prefix)) nextSkip = append(nextSkip, after)
} }
} }
submiss, subleft, subdiff, ok := compareJSON(v.(map[string]any), vb.(map[string]any), nextSkip) submiss, subleft, subdiff, ok := compareJSON(v.(map[string]any), vb.(map[string]any), nextSkip)

View File

@ -981,10 +981,10 @@ _EOF
run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
cid=$output cid=$output
for mask in /proc/acpi /proc/kcore /proc/keys /proc/latency_stats /proc/sched_debug /proc/scsi /proc/timer_list /proc/timer_stats /sys/devices/virtual/powercap /sys/firmware /sys/fs/selinux; do for mask in /proc/acpi /proc/interrupts /proc/kcore /proc/keys /proc/latency_stats /proc/sched_debug /proc/scsi /proc/timer_list /proc/timer_stats /sys/devices/virtual/powercap /sys/firmware /sys/fs/selinux; do
if test -d $mask; then if test -d $mask; then
run_buildah run $cid ls $mask run_buildah run $cid sh -c "echo $mask/*" # globbing will fail whether it's simply unreadable, or readable but empty
expect_output "" "Directories should be empty" expect_output "$mask/*" "Directories should be empty"
fi fi
if test -f $mask; then if test -f $mask; then
run_buildah run $cid cat $mask run_buildah run $cid cat $mask