Merge pull request #6380 from nalind/go1.24

Update to Go 1.24
This commit is contained in:
openshift-merge-bot[bot] 2025-09-15 18:41:48 +00:00 committed by GitHub
commit 48ac5410cb
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
18 changed files with 64 additions and 67 deletions

View File

@ -35,7 +35,7 @@ env:
DEBIAN_NAME: "debian-13"
# Image identifiers
IMAGE_SUFFIX: "c20250812t173301z-f42f41d13"
IMAGE_SUFFIX: "c20250910t092246z-f42f41d13"
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}"
DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}"
@ -124,7 +124,7 @@ vendor_task:
# Runs within Cirrus's "community cluster"
container:
image: docker.io/library/golang:1.23.3
image: docker.io/library/golang:1.24.0
cpu: 1
memory: 1

View File

@ -132,8 +132,8 @@ type Builder struct {
ImageHistoryComment string `json:"history-comment,omitempty"`
// Image metadata and runtime settings, in multiple formats.
OCIv1 v1.Image `json:"ociv1,omitempty"`
Docker docker.V2Image `json:"docker,omitempty"`
OCIv1 v1.Image `json:"ociv1"`
Docker docker.V2Image `json:"docker"`
// DefaultMountsFilePath is the file path holding the mounts to be mounted in "host-path:container-path" format.
DefaultMountsFilePath string `json:"defaultMountsFilePath,omitempty"`

View File

@ -277,8 +277,8 @@ func containerOutputHeader(truncate bool) {
func parseCtrFilter(filter string) (*containerFilterParams, error) {
params := new(containerFilterParams)
filters := strings.Split(filter, ",")
for _, param := range filters {
filters := strings.SplitSeq(filter, ",")
for param := range filters {
pair := strings.SplitN(param, "=", 2)
if len(pair) != 2 {
return nil, fmt.Errorf("incorrect filter value %q, should be of form filter=value", param)

View File

@ -162,13 +162,13 @@ type request struct {
preservedDirectory string
Globs []string `json:",omitempty"` // used by stat, get
preservedGlobs []string
StatOptions StatOptions `json:",omitempty"`
GetOptions GetOptions `json:",omitempty"`
PutOptions PutOptions `json:",omitempty"`
MkdirOptions MkdirOptions `json:",omitempty"`
RemoveOptions RemoveOptions `json:",omitempty"`
EnsureOptions EnsureOptions `json:",omitempty"`
ConditionalRemoveOptions ConditionalRemoveOptions `json:",omitempty"`
StatOptions StatOptions
GetOptions GetOptions
PutOptions PutOptions
MkdirOptions MkdirOptions
RemoveOptions RemoveOptions
EnsureOptions EnsureOptions
ConditionalRemoveOptions ConditionalRemoveOptions
}
func (req *request) Excludes() []string {
@ -248,15 +248,15 @@ func (req *request) GIDMap() []idtools.IDMap {
// Response encodes a single response.
type response struct {
Error string `json:",omitempty"`
Stat statResponse `json:",omitempty"`
Eval evalResponse `json:",omitempty"`
Get getResponse `json:",omitempty"`
Put putResponse `json:",omitempty"`
Mkdir mkdirResponse `json:",omitempty"`
Remove removeResponse `json:",omitempty"`
Ensure ensureResponse `json:",omitempty"`
ConditionalRemove conditionalRemoveResponse `json:",omitempty"`
Error string `json:",omitempty"`
Stat statResponse
Eval evalResponse
Get getResponse
Put putResponse
Mkdir mkdirResponse
Remove removeResponse
Ensure ensureResponse
ConditionalRemove conditionalRemoveResponse
}
// statResponse encodes a response for a single Stat request.
@ -801,7 +801,7 @@ func copierWithSubprocess(bulkReader io.Reader, bulkWriter io.Writer, req reques
}
loggedOutput := strings.TrimSuffix(errorBuffer.String(), "\n")
if len(loggedOutput) > 0 {
for _, output := range strings.Split(loggedOutput, "\n") {
for output := range strings.SplitSeq(loggedOutput, "\n") {
logrus.Debug(output)
}
}
@ -1588,8 +1588,8 @@ func mapWithPrefixedKeysWithoutKeyPrefix[K any](m map[string]K, p string) map[st
}
cloned := make(map[string]K, len(m))
for k, v := range m {
if strings.HasPrefix(k, p) {
cloned[strings.TrimPrefix(k, p)] = v
if after, ok := strings.CutPrefix(k, p); ok {
cloned[after] = v
}
}
return cloned
@ -1819,7 +1819,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
return fmt.Errorf("%q is not a subdirectory of %q: %w", directory, req.Root, err)
}
subdir := ""
for _, component := range strings.Split(rel, string(os.PathSeparator)) {
for component := range strings.SplitSeq(rel, string(os.PathSeparator)) {
subdir = filepath.Join(subdir, component)
path := filepath.Join(req.Root, subdir)
if err := os.Mkdir(path, 0o700); err == nil {
@ -2187,7 +2187,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
}
func copierHandlerMkdir(req request, idMappings *idtools.IDMappings) (*response, func() error, error) {
errorResponse := func(fmtspec string, args ...any) (*response, func() error, error) {
errorResponse := func(fmtspec string, args ...any) (*response, func() error, error) { //nolint:unparam
return &response{Error: fmt.Sprintf(fmtspec, args...), Mkdir: mkdirResponse{}}, nil, nil
}
dirUID, dirGID := 0, 0
@ -2219,7 +2219,7 @@ func copierHandlerMkdir(req request, idMappings *idtools.IDMappings) (*response,
subdir := ""
var created []string
for _, component := range strings.Split(rel, string(os.PathSeparator)) {
for component := range strings.SplitSeq(rel, string(os.PathSeparator)) {
subdir = filepath.Join(subdir, component)
path := filepath.Join(req.Root, subdir)
if err := os.Mkdir(path, 0o700); err == nil {

View File

@ -65,7 +65,7 @@ func Lgetxattrs(path string) (map[string]string, error) {
return nil, fmt.Errorf("unable to read list of attributes for %q: size would have been too big", path)
}
m := make(map[string]string)
for _, attribute := range strings.Split(string(list), string('\000')) {
for attribute := range strings.SplitSeq(string(list), string('\000')) {
if isRelevantXattr(attribute) {
attributeSize := initialXattrValueSize
var attributeValue []byte

View File

@ -124,7 +124,7 @@ type V1Compatibility struct {
Created time.Time `json:"created"`
ContainerConfig struct {
Cmd []string
} `json:"container_config,omitempty"`
} `json:"container_config"`
Author string `json:"author,omitempty"`
ThrowAway bool `json:"throwaway,omitempty"`
}
@ -143,7 +143,7 @@ type V1Image struct {
// Container is the id of the container used to commit
Container string `json:"container,omitempty"`
// ContainerConfig is the configuration of the container that is committed into the image
ContainerConfig Config `json:"container_config,omitempty"`
ContainerConfig Config `json:"container_config"`
// DockerVersion specifies the version of Docker that was used to build the image
DockerVersion string `json:"docker_version,omitempty"`
// Author is the name of the author that was specified when committing the image

2
go.mod
View File

@ -2,7 +2,7 @@ module github.com/containers/buildah
// Warning: Ensure the "go" and "toolchain" versions match exactly to prevent unwanted auto-updates
go 1.23.3
go 1.24.0
require (
github.com/containerd/platforms v1.0.0-rc.1

View File

@ -836,12 +836,12 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
}
case "ADD", "COPY":
for _, flag := range child.Flags { // flags for this instruction
if strings.HasPrefix(flag, "--from=") {
if after, ok := strings.CutPrefix(flag, "--from="); ok {
// TODO: this didn't undergo variable and
// arg expansion, so if the previous stage
// was named using argument values, we might
// not record the right value here.
rootfs := strings.TrimPrefix(flag, "--from=")
rootfs := after
b.rootfsMap[rootfs] = struct{}{}
logrus.Debugf("rootfs needed for COPY in stage %d: %q", stageIndex, rootfs)
// Populate dependency tree and check
@ -885,8 +885,8 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
// dependency calculation.
if strings.HasPrefix(flag, "--mount=") && strings.Contains(flag, "from") {
mountFlags := strings.TrimPrefix(flag, "--mount=")
fields := strings.Split(mountFlags, ",")
for _, field := range fields {
fields := strings.SplitSeq(mountFlags, ",")
for field := range fields {
if mountFrom, hasFrom := strings.CutPrefix(field, "from="); hasFrom {
// Check if this base is a stage if yes
// add base to current stage's dependency tree

View File

@ -1913,7 +1913,7 @@ func (s *StageExecutor) getCreatedBy(node *parser.Node, addedContentSummary stri
switch command {
case "ARG":
for _, variable := range strings.Fields(node.Original) {
for variable := range strings.FieldsSeq(node.Original) {
if variable != "ARG" {
s.argsFromContainerfile = append(s.argsFromContainerfile, variable)
}

View File

@ -183,11 +183,11 @@ func getHostDistributionInfo() map[string]string {
l := bufio.NewScanner(f)
for l.Scan() {
if strings.HasPrefix(l.Text(), "ID=") {
dist["Distribution"] = strings.TrimPrefix(l.Text(), "ID=")
if after, ok := strings.CutPrefix(l.Text(), "ID="); ok {
dist["Distribution"] = after
}
if strings.HasPrefix(l.Text(), "VERSION_ID=") {
dist["Version"] = strings.Trim(strings.TrimPrefix(l.Text(), "VERSION_ID="), "\"")
if after, ok := strings.CutPrefix(l.Text(), "VERSION_ID="); ok {
dist["Version"] = strings.Trim(after, "\"")
}
}
return dist

View File

@ -2,6 +2,7 @@ package config
import (
"fmt"
"maps"
"os"
"slices"
"strings"
@ -136,9 +137,7 @@ func OverrideOCI(oconfig *v1.ImageConfig, overrideChanges []string, overrideConf
if oconfig.Labels == nil {
oconfig.Labels = make(map[string]string)
}
for k, v := range overrideConfig.Labels {
oconfig.Labels[k] = v
}
maps.Copy(oconfig.Labels, overrideConfig.Labels)
}
oconfig.StopSignal = overrideConfig.StopSignal
}
@ -206,9 +205,7 @@ func OverrideDocker(dconfig *docker.Config, overrideChanges []string, overrideCo
if dconfig.Labels == nil {
dconfig.Labels = make(map[string]string)
}
for k, v := range overrideConfig.Labels {
dconfig.Labels[k] = v
}
maps.Copy(dconfig.Labels, overrideConfig.Labels)
}
dconfig.StopSignal = overrideConfig.StopSignal
dconfig.StopTimeout = overrideConfig.StopTimeout

View File

@ -543,7 +543,7 @@ func slop(size int64, slop string) int64 {
if slop == "" {
return size * 5 / 4
}
for _, factor := range strings.Split(slop, "+") {
for factor := range strings.SplitSeq(slop, "+") {
factor = strings.TrimSpace(factor)
if factor == "" {
continue

View File

@ -240,8 +240,8 @@ func GenerateMeasurement(workloadConfig WorkloadConfig, firmwareLibrary string)
scanner := bufio.NewScanner(&stdout)
for scanner.Scan() {
line := scanner.Text()
if strings.HasPrefix(line, prefix+":") {
return strings.TrimSpace(strings.TrimPrefix(line, prefix+":")), nil
if after, ok := strings.CutPrefix(line, prefix+":"); ok {
return strings.TrimSpace(after), nil
}
}
return "", fmt.Errorf("generating measurement: no line starting with %q found in output from krunfw_measurement", prefix+":")

View File

@ -202,7 +202,7 @@ func Merge(mergeStrategy define.SBOMMergeStrategy, inputOutputSBOM, inputSBOM, o
Dependencies []string `json:"dependencies,omitempty"`
}
type purlDocument struct {
ImageContents purlImageContents `json:"image_contents,omitempty"`
ImageContents purlImageContents `json:"image_contents"`
}
purls := []string{}
seenPurls := make(map[string]struct{})

View File

@ -474,7 +474,7 @@ func readBuildArgFile(buildargfile string, args map[string]string) error {
if err != nil {
return err
}
for _, arg := range strings.Split(string(argfile), "\n") {
for arg := range strings.SplitSeq(string(argfile), "\n") {
if len(arg) == 0 || arg[0] == '#' {
continue
}

View File

@ -733,7 +733,7 @@ func GetBuildOutput(buildOutput string) (define.BuildOutputOption, error) {
isStdout := false
typeSelected := ""
pathSelected := ""
for _, option := range strings.Split(buildOutput, ",") {
for option := range strings.SplitSeq(buildOutput, ",") {
key, value, found := strings.Cut(option, "=")
if !found {
return define.BuildOutputOption{}, fmt.Errorf("invalid build output options %q, expected format key=value", buildOutput)
@ -789,7 +789,7 @@ func GetConfidentialWorkloadOptions(arg string) (define.ConfidentialWorkloadOpti
TempDir: GetTempDir(),
}
defaults := options
for _, option := range strings.Split(arg, ",") {
for option := range strings.SplitSeq(arg, ",") {
var err error
switch {
case strings.HasPrefix(option, "type="):
@ -936,7 +936,7 @@ func GetAutoOptions(base string) (*storageTypes.AutoUserNsOptions, error) {
if len(parts) == 1 {
return &options, nil
}
for _, o := range strings.Split(parts[1], ",") {
for o := range strings.SplitSeq(parts[1], ",") {
v := strings.SplitN(o, "=", 2)
if len(v) != 2 {
return nil, fmt.Errorf("invalid option specified: %q", o)

View File

@ -672,10 +672,10 @@ func buildUsingDocker(ctx context.Context, t *testing.T, client *docker.Client,
// read the Dockerfile so that we can pull base images
dockerfileContent, err := os.ReadFile(dockerfileName)
require.NoErrorf(t, err, "reading dockerfile %q", dockerfileName)
for _, line := range strings.Split(string(dockerfileContent), "\n") {
for line := range strings.SplitSeq(string(dockerfileContent), "\n") {
line = strings.TrimSpace(line)
if strings.HasPrefix(line, "# syntax=") {
pullImageIfMissing(t, client, strings.TrimPrefix(line, "# syntax="))
if after, ok := strings.CutPrefix(line, "# syntax="); ok {
pullImageIfMissing(t, client, after)
}
}
parsed, err := imagebuilder.ParseDockerfile(bytes.NewReader(dockerfileContent))
@ -880,7 +880,7 @@ func buildPost(t *testing.T, test testCase, err error, buildTool, outputString,
// FSTree holds the information we have about an image's filesystem
type FSTree struct {
Layers []Layer `json:"layers,omitempty"`
Tree FSEntry `json:"tree,omitempty"`
Tree FSEntry `json:"tree"`
}
// Layer keeps track of the digests and contents of a layer blob
@ -900,7 +900,7 @@ type FSHeader struct {
Mode int64 `json:"mode,omitempty"`
UID int `json:"uid"`
GID int `json:"gid"`
ModTime time.Time `json:"mtime,omitempty"`
ModTime time.Time `json:"mtime"`
Devmajor int64 `json:"devmajor,omitempty"`
Devminor int64 `json:"devminor,omitempty"`
Xattrs map[string]string `json:"xattrs,omitempty"`
@ -1131,8 +1131,8 @@ func applyLayerToFSTree(t *testing.T, layer *Layer, root *FSEntry) {
}
// if the item is a whiteout, strip the "this is a whiteout
// entry" prefix and remove the item it names
if strings.HasPrefix(base, ".wh.") {
delete(dirEntry.Children, strings.TrimPrefix(base, ".wh."))
if after, ok := strings.CutPrefix(base, ".wh."); ok {
delete(dirEntry.Children, after)
continue
}
// if the item already exists, make sure we don't get confused
@ -1281,8 +1281,8 @@ func compareJSON(a, b map[string]any, skip []string) (missKeys, leftKeys, diffKe
var nextSkip []string
prefix := k + ":"
for _, s := range skip {
if strings.HasPrefix(s, prefix) {
nextSkip = append(nextSkip, strings.TrimPrefix(s, prefix))
if after, ok0 := strings.CutPrefix(s, prefix); ok0 {
nextSkip = append(nextSkip, after)
}
}
submiss, subleft, subdiff, ok := compareJSON(v.(map[string]any), vb.(map[string]any), nextSkip)

View File

@ -981,10 +981,10 @@ _EOF
run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
cid=$output
for mask in /proc/acpi /proc/kcore /proc/keys /proc/latency_stats /proc/sched_debug /proc/scsi /proc/timer_list /proc/timer_stats /sys/devices/virtual/powercap /sys/firmware /sys/fs/selinux; do
for mask in /proc/acpi /proc/interrupts /proc/kcore /proc/keys /proc/latency_stats /proc/sched_debug /proc/scsi /proc/timer_list /proc/timer_stats /sys/devices/virtual/powercap /sys/firmware /sys/fs/selinux; do
if test -d $mask; then
run_buildah run $cid ls $mask
expect_output "" "Directories should be empty"
run_buildah run $cid sh -c "echo $mask/*" # globbing will fail whether it's simply unreadable, or readable but empty
expect_output "$mask/*" "Directories should be empty"
fi
if test -f $mask; then
run_buildah run $cid cat $mask