vendor: bump c/storage to v1.46.2-0.20230526114421-55ee2d19292f

[NO NEW TESTS NEEDED]

Signed-off-by: Aditya R <arajan@redhat.com>
This commit is contained in:
Aditya R 2023-05-26 18:05:28 +05:30
parent 5e30250039
commit 9cd28db91e
No known key found for this signature in database
GPG Key ID: 8E5A8A19DF7C8673
25 changed files with 170 additions and 183 deletions

4
go.mod
View File

@ -8,7 +8,7 @@ require (
github.com/containers/common v0.53.1-0.20230516065732-82045748b3e3 github.com/containers/common v0.53.1-0.20230516065732-82045748b3e3
github.com/containers/image/v5 v5.25.1-0.20230511204805-94ab8dee62ea github.com/containers/image/v5 v5.25.1-0.20230511204805-94ab8dee62ea
github.com/containers/ocicrypt v1.1.7 github.com/containers/ocicrypt v1.1.7
github.com/containers/storage v1.46.2-0.20230508110141-51c23d59f8f3 github.com/containers/storage v1.46.2-0.20230526114421-55ee2d19292f
github.com/cyphar/filepath-securejoin v0.2.3 github.com/cyphar/filepath-securejoin v0.2.3
github.com/docker/distribution v2.8.2+incompatible github.com/docker/distribution v2.8.2+incompatible
github.com/docker/docker v23.0.6+incompatible github.com/docker/docker v23.0.6+incompatible
@ -90,7 +90,7 @@ require (
github.com/manifoldco/promptui v0.9.0 // indirect github.com/manifoldco/promptui v0.9.0 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/miekg/pkcs11 v1.1.1 // indirect github.com/miekg/pkcs11 v1.1.1 // indirect
github.com/mistifyio/go-zfs/v3 v3.0.0 // indirect github.com/mistifyio/go-zfs/v3 v3.0.1 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/moby/patternmatcher v0.5.0 // indirect github.com/moby/patternmatcher v0.5.0 // indirect
github.com/moby/sys/mountinfo v0.6.2 // indirect github.com/moby/sys/mountinfo v0.6.2 // indirect

8
go.sum
View File

@ -56,8 +56,8 @@ github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYgle
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
github.com/containers/ocicrypt v1.1.7 h1:thhNr4fu2ltyGz8aMx8u48Ae0Pnbip3ePP9/mzkZ/3U= github.com/containers/ocicrypt v1.1.7 h1:thhNr4fu2ltyGz8aMx8u48Ae0Pnbip3ePP9/mzkZ/3U=
github.com/containers/ocicrypt v1.1.7/go.mod h1:7CAhjcj2H8AYp5YvEie7oVSK2AhBY8NscCYRawuDNtw= github.com/containers/ocicrypt v1.1.7/go.mod h1:7CAhjcj2H8AYp5YvEie7oVSK2AhBY8NscCYRawuDNtw=
github.com/containers/storage v1.46.2-0.20230508110141-51c23d59f8f3 h1:xEKpYtKcKezQICn+BGBbGa3HnA46z+tzGv8ZuxtQOTk= github.com/containers/storage v1.46.2-0.20230526114421-55ee2d19292f h1:cOlPIrSViV8KpymDfIJZtdQYzjlvfJs33mSVCGSBkRM=
github.com/containers/storage v1.46.2-0.20230508110141-51c23d59f8f3/go.mod h1:sbKXsEKeGkAk9J0PSicUadeA7J+49rmPODDuUVEqAcM= github.com/containers/storage v1.46.2-0.20230526114421-55ee2d19292f/go.mod h1:jvBDv7Bu1mpbQ+tD5XgBN+tCgjnTWu3dD4ZZbdUG4GI=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw=
@ -263,8 +263,8 @@ github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lL
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU=
github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
github.com/mistifyio/go-zfs/v3 v3.0.0 h1:J5QK618xRcXnQYZ2GE5FdmpS1ufIrWue+lR/mpe6/14= github.com/mistifyio/go-zfs/v3 v3.0.1 h1:YaoXgBePoMA12+S1u/ddkv+QqxcfiZK4prI6HPnkFiU=
github.com/mistifyio/go-zfs/v3 v3.0.0/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k= github.com/mistifyio/go-zfs/v3 v3.0.1/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k=
github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=

View File

@ -23,7 +23,7 @@ env:
# GCE project where images live # GCE project where images live
IMAGE_PROJECT: "libpod-218412" IMAGE_PROJECT: "libpod-218412"
# VM Image built in containers/automation_images # VM Image built in containers/automation_images
IMAGE_SUFFIX: "c20230426t140447z-f38f37d12" IMAGE_SUFFIX: "c20230517t144652z-f38f37d12"
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}" FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}" DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}"
@ -56,7 +56,6 @@ gce_instance:
linux_testing: &linux_testing linux_testing: &linux_testing
depends_on: depends_on:
- lint - lint
only_if: $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*'
gce_instance: # Only need to specify differences from defaults (above) gce_instance: # Only need to specify differences from defaults (above)
image_name: "${VM_IMAGE}" image_name: "${VM_IMAGE}"
@ -129,7 +128,10 @@ lint_task:
build_script: | build_script: |
apt-get update apt-get update
apt-get install -y libbtrfs-dev libdevmapper-dev apt-get install -y libbtrfs-dev libdevmapper-dev
test_script: make TAGS=regex_precompile local-validate && make lint && make clean test_script: |
make TAGS=regex_precompile local-validate
make lint
make clean
# Update metadata on VM images referenced by this repository state # Update metadata on VM images referenced by this repository state
@ -167,7 +169,7 @@ vendor_task:
cross_task: cross_task:
container: container:
image: golang:1.18 image: golang:1.19
build_script: make cross build_script: make cross
@ -181,6 +183,6 @@ success_task:
- vendor - vendor
- cross - cross
container: container:
image: golang:1.18 image: golang:1.19
clone_script: 'mkdir -p "$CIRRUS_WORKING_DIR"' # Source code not needed clone_script: 'mkdir -p "$CIRRUS_WORKING_DIR"' # Source code not needed
script: /bin/true script: /bin/true

View File

@ -4,68 +4,8 @@ run:
deadline: 5m deadline: 5m
skip-dirs-use-default: true skip-dirs-use-default: true
linters: linters:
enable-all: true
disable: disable:
- cyclop
- deadcode
- dogsled
- dupl
- errcheck - errcheck
- errname
- errorlint
- exhaustive
- exhaustivestruct
- exhaustruct
- forbidigo
- forcetypeassert
- funlen
- gci
- gochecknoglobals
- gochecknoinits
- gocognit
- gocritic
- gocyclo
- godot
- godox
- goerr113
- gofumpt
- golint
- gomnd
- gosec
- gosimple - gosimple
- govet - govet
- ifshort
- ineffassign
- interfacer
- interfacebloat
- ireturn
- lll
- maintidx
- maligned
- misspell
- musttag
- nakedret
- nestif
- nlreturn
- nolintlint
- nonamedreturns
- nosnakecase
- paralleltest
- prealloc
- predeclared
- rowserrcheck
- scopelint
- staticcheck - staticcheck
- structcheck
- stylecheck
- tagliatelle
- testpackage
- thelper
- unconvert
- unparam
- varcheck
- varnamelen
- wastedassign
- whitespace
- wrapcheck
- wsl

View File

@ -1,13 +1,18 @@
export GO111MODULE=off
export GOPROXY=https://proxy.golang.org
.PHONY: \ .PHONY: \
all \ all \
binary \
clean \ clean \
codespell \
containers-storage \
cross \
default \ default \
docs \ docs \
gccgo \
help \ help \
install \
install.docs \
install.tools \ install.tools \
lint \
local-binary \ local-binary \
local-cross \ local-cross \
local-gccgo \ local-gccgo \
@ -15,33 +20,25 @@ export GOPROXY=https://proxy.golang.org
local-test-integration \ local-test-integration \
local-test-unit \ local-test-unit \
local-validate \ local-validate \
lint \ test-integration \
vendor test-unit \
validate \
vendor \
vendor-in-container
PACKAGE := github.com/containers/storage
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
GIT_BRANCH_CLEAN := $(shell echo $(GIT_BRANCH) | sed -e "s/[^[:alnum:]]/-/g")
EPOCH_TEST_COMMIT := 0418ebf59f9e1f564831c0ba9378b7f8e40a1c73
NATIVETAGS := NATIVETAGS :=
AUTOTAGS := $(shell ./hack/btrfs_tag.sh) $(shell ./hack/libdm_tag.sh) $(shell ./hack/libsubid_tag.sh) AUTOTAGS := $(shell ./hack/btrfs_tag.sh) $(shell ./hack/libdm_tag.sh) $(shell ./hack/libsubid_tag.sh)
BUILDFLAGS := -tags "$(AUTOTAGS) $(TAGS)" $(FLAGS) BUILDFLAGS := -tags "$(AUTOTAGS) $(TAGS)" $(FLAGS)
GO ?= go GO ?= go
TESTFLAGS := $(shell $(GO) test -race $(BUILDFLAGS) ./pkg/stringutils 2>&1 > /dev/null && echo -race) TESTFLAGS := $(shell $(GO) test -race $(BUILDFLAGS) ./pkg/stringutils 2>&1 > /dev/null && echo -race)
# Go module support: set `-mod=vendor` to use the vendored sources
ifeq ($(shell $(GO) help mod >/dev/null 2>&1 && echo true), true)
GO:=GO111MODULE=on $(GO)
MOD_VENDOR=-mod=vendor
endif
default all: local-binary docs local-validate local-cross ## validate all checks, build and cross-build\nbinaries and docs default all: local-binary docs local-validate local-cross ## validate all checks, build and cross-build\nbinaries and docs
clean: ## remove all built files clean: ## remove all built files
$(RM) -f containers-storage containers-storage.* docs/*.1 docs/*.5 $(RM) -f containers-storage containers-storage.* docs/*.1 docs/*.5
sources := $(wildcard *.go cmd/containers-storage/*.go drivers/*.go drivers/*/*.go internal/*/*.go pkg/*/*.go pkg/*/*/*.go types/*.go) containers-storage: ## build using gc on the host
containers-storage: $(sources) ## build using gc on the host $(GO) build -compiler gc $(BUILDFLAGS) ./cmd/containers-storage
$(GO) build $(MOD_VENDOR) -compiler gc $(BUILDFLAGS) ./cmd/containers-storage
codespell: codespell:
codespell -S Makefile,build,buildah,buildah.spec,imgtype,copy,AUTHORS,bin,vendor,.git,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L worl,flate,uint,iff,od,ERRO -w codespell -S Makefile,build,buildah,buildah.spec,imgtype,copy,AUTHORS,bin,vendor,.git,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L worl,flate,uint,iff,od,ERRO -w
@ -49,15 +46,15 @@ codespell:
binary local-binary: containers-storage binary local-binary: containers-storage
local-gccgo gccgo: ## build using gccgo on the host local-gccgo gccgo: ## build using gccgo on the host
GCCGO=$(PWD)/hack/gccgo-wrapper.sh $(GO) build $(MOD_VENDOR) -compiler gccgo $(BUILDFLAGS) -o containers-storage.gccgo ./cmd/containers-storage GCCGO=$(PWD)/hack/gccgo-wrapper.sh $(GO) build -compiler gccgo $(BUILDFLAGS) -o containers-storage.gccgo ./cmd/containers-storage
local-cross cross: ## cross build the binaries for arm, darwin, and freebsd local-cross cross: ## cross build the binaries for arm, darwin, and freebsd
@for target in linux/amd64 linux/386 linux/arm linux/arm64 linux/ppc64 linux/ppc64le linux/s390x linux/mips linux/mipsle linux/mips64 linux/mips64le darwin/amd64 windows/amd64 freebsd/amd64 freebsd/arm64 ; do \ @for target in linux/amd64 linux/386 linux/arm linux/arm64 linux/ppc64 linux/ppc64le linux/s390x linux/mips linux/mipsle linux/mips64 linux/mips64le darwin/amd64 windows/amd64 freebsd/amd64 freebsd/arm64 ; do \
os=`echo $${target} | cut -f1 -d/` ; \ os=`echo $${target} | cut -f1 -d/` ; \
arch=`echo $${target} | cut -f2 -d/` ; \ arch=`echo $${target} | cut -f2 -d/` ; \
suffix=$${os}.$${arch} ; \ suffix=$${os}.$${arch} ; \
echo env CGO_ENABLED=0 GOOS=$${os} GOARCH=$${arch} $(GO) build $(MOD_VENDOR) -compiler gc -tags \"$(NATIVETAGS) $(TAGS)\" $(FLAGS) -o containers-storage.$${suffix} ./cmd/containers-storage ; \ echo env CGO_ENABLED=0 GOOS=$${os} GOARCH=$${arch} $(GO) build -compiler gc -tags \"$(NATIVETAGS) $(TAGS)\" $(FLAGS) -o containers-storage.$${suffix} ./cmd/containers-storage ; \
env CGO_ENABLED=0 GOOS=$${os} GOARCH=$${arch} $(GO) build $(MOD_VENDOR) -compiler gc -tags "$(NATIVETAGS) $(TAGS)" $(FLAGS) -o containers-storage.$${suffix} ./cmd/containers-storage || exit 1 ; \ env CGO_ENABLED=0 GOOS=$${os} GOARCH=$${arch} $(GO) build -compiler gc -tags "$(NATIVETAGS) $(TAGS)" $(FLAGS) -o containers-storage.$${suffix} ./cmd/containers-storage || exit 1 ; \
done done
docs: install.tools ## build the docs on the host docs: install.tools ## build the docs on the host
@ -66,7 +63,7 @@ docs: install.tools ## build the docs on the host
local-test: local-binary local-test-unit local-test-integration ## build the binaries and run the tests local-test: local-binary local-test-unit local-test-integration ## build the binaries and run the tests
local-test-unit test-unit: local-binary ## run the unit tests on the host (requires\nsuperuser privileges) local-test-unit test-unit: local-binary ## run the unit tests on the host (requires\nsuperuser privileges)
@$(GO) test -count 1 $(MOD_VENDOR) $(BUILDFLAGS) $(TESTFLAGS) $(shell $(GO) list ./... | grep -v ^$(PACKAGE)/vendor) @$(GO) test -count 1 $(BUILDFLAGS) $(TESTFLAGS) ./...
local-test-integration test-integration: local-binary ## run the integration tests on the host (requires\nsuperuser privileges) local-test-integration test-integration: local-binary ## run the integration tests on the host (requires\nsuperuser privileges)
@cd tests; ./test_runner.bash @cd tests; ./test_runner.bash
@ -78,9 +75,6 @@ local-validate validate: install.tools ## validate DCO and gofmt on the host
install.tools: install.tools:
$(MAKE) -C tests/tools $(MAKE) -C tests/tools
$(FFJSON):
$(MAKE) -C tests/tools
install.docs: docs install.docs: docs
$(MAKE) -C docs install $(MAKE) -C docs install

View File

@ -523,6 +523,13 @@ func (r *containerStore) load(lockedForWriting bool) (bool, error) {
// The caller must hold r.inProcessLock for reading (but usually holds it for writing in order to make the desired changes). // The caller must hold r.inProcessLock for reading (but usually holds it for writing in order to make the desired changes).
func (r *containerStore) save(saveLocations containerLocations) error { func (r *containerStore) save(saveLocations containerLocations) error {
r.lockfile.AssertLockedForWriting() r.lockfile.AssertLockedForWriting()
// This must be done before we write the file, because the process could be terminated
// after the file is written but before the lock file is updated.
lw, err := r.lockfile.RecordWrite()
if err != nil {
return err
}
r.lastWrite = lw
for locationIndex := 0; locationIndex < numContainerLocationIndex; locationIndex++ { for locationIndex := 0; locationIndex < numContainerLocationIndex; locationIndex++ {
location := containerLocationFromIndex(locationIndex) location := containerLocationFromIndex(locationIndex)
if location&saveLocations == 0 { if location&saveLocations == 0 {
@ -553,11 +560,6 @@ func (r *containerStore) save(saveLocations containerLocations) error {
return err return err
} }
} }
lw, err := r.lockfile.RecordWrite()
if err != nil {
return err
}
r.lastWrite = lw
return nil return nil
} }

View File

@ -342,12 +342,12 @@ func New(name string, config Options) (Driver, error) {
} }
// Guess for prior driver // Guess for prior driver
driversMap := scanPriorDrivers(config.Root) driversMap := ScanPriorDrivers(config.Root)
// use the supplied priority list unless it is empty // use the supplied priority list unless it is empty
prioList := config.DriverPriority prioList := config.DriverPriority
if len(prioList) == 0 { if len(prioList) == 0 {
prioList = priority prioList = Priority
} }
for _, name := range prioList { for _, name := range prioList {
@ -419,12 +419,12 @@ func isDriverNotSupported(err error) bool {
} }
// scanPriorDrivers returns an un-ordered scan of directories of prior storage drivers // scanPriorDrivers returns an un-ordered scan of directories of prior storage drivers
func scanPriorDrivers(root string) map[string]bool { func ScanPriorDrivers(root string) map[string]bool {
driversMap := make(map[string]bool) driversMap := make(map[string]bool)
for driver := range drivers { for driver := range drivers {
p := filepath.Join(root, driver) p := filepath.Join(root, driver)
if _, err := os.Stat(p); err == nil && driver != "vfs" { if _, err := os.Stat(p); err == nil {
driversMap[driver] = true driversMap[driver] = true
} }
} }

View File

@ -2,7 +2,7 @@ package graphdriver
var ( var (
// Slice of drivers that should be used in order // Slice of drivers that should be used in order
priority = []string{ Priority = []string{
"vfs", "vfs",
} }
) )

View File

@ -13,7 +13,7 @@ const (
var ( var (
// Slice of drivers that should be used in an order // Slice of drivers that should be used in an order
priority = []string{ Priority = []string{
"zfs", "zfs",
"vfs", "vfs",
} }

View File

@ -90,7 +90,7 @@ const (
var ( var (
// Slice of drivers that should be used in an order // Slice of drivers that should be used in an order
priority = []string{ Priority = []string{
"overlay", "overlay",
// We don't support devicemapper without configuration // We don't support devicemapper without configuration
// "devicemapper", // "devicemapper",

View File

@ -31,7 +31,7 @@ const (
var ( var (
// Slice of drivers that should be used in an order // Slice of drivers that should be used in an order
priority = []string{ Priority = []string{
"zfs", "zfs",
} }

View File

@ -5,7 +5,7 @@ package graphdriver
var ( var (
// Slice of drivers that should be used in an order // Slice of drivers that should be used in an order
priority = []string{ Priority = []string{
"unsupported", "unsupported",
} }
) )

View File

@ -2,7 +2,7 @@ package graphdriver
var ( var (
// Slice of drivers that should be used in order // Slice of drivers that should be used in order
priority = []string{ Priority = []string{
"windowsfilter", "windowsfilter",
} }
) )

View File

@ -210,6 +210,9 @@ func doesVolatile(d string) (bool, error) {
} }
// Mount using the mandatory options and configured options // Mount using the mandatory options and configured options
opts := fmt.Sprintf("volatile,lowerdir=%s,upperdir=%s,workdir=%s", path.Join(td, "lower"), path.Join(td, "upper"), path.Join(td, "work")) opts := fmt.Sprintf("volatile,lowerdir=%s,upperdir=%s,workdir=%s", path.Join(td, "lower"), path.Join(td, "upper"), path.Join(td, "work"))
if unshare.IsRootless() {
opts = fmt.Sprintf("%s,userxattr", opts)
}
if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", 0, opts); err != nil { if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", 0, opts); err != nil {
return false, fmt.Errorf("failed to mount overlay for volatile check: %w", err) return false, fmt.Errorf("failed to mount overlay for volatile check: %w", err)
} }

View File

@ -202,6 +202,8 @@ func checkSupportVolatile(home, runhome string) (bool, error) {
if err = cachedFeatureRecord(runhome, feature, usingVolatile, ""); err != nil { if err = cachedFeatureRecord(runhome, feature, usingVolatile, ""); err != nil {
return false, fmt.Errorf("recording volatile-being-used status: %w", err) return false, fmt.Errorf("recording volatile-being-used status: %w", err)
} }
} else {
usingVolatile = false
} }
} }
return usingVolatile, nil return usingVolatile, nil
@ -1054,6 +1056,11 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
if err := d.quotaCtl.SetQuota(dir, quota); err != nil { if err := d.quotaCtl.SetQuota(dir, quota); err != nil {
return err return err
} }
if imageStore != "" {
if err := d.quotaCtl.SetQuota(imageStore, quota); err != nil {
return err
}
}
} }
perms := defaultPerms perms := defaultPerms
@ -1285,6 +1292,9 @@ func (d *Driver) Remove(id string) error {
} }
if d.quotaCtl != nil { if d.quotaCtl != nil {
d.quotaCtl.ClearQuota(dir) d.quotaCtl.ClearQuota(dir)
if d.imageStore != "" {
d.quotaCtl.ClearQuota(d.imageStore)
}
} }
return nil return nil
} }
@ -1678,16 +1688,15 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
// Use mountFrom when the mount data has exceeded the page size. The mount syscall fails if // Use mountFrom when the mount data has exceeded the page size. The mount syscall fails if
// the mount data cannot fit within a page and relative links make the mount data much // the mount data cannot fit within a page and relative links make the mount data much
// smaller at the expense of requiring a fork exec to chdir(). // smaller at the expense of requiring a fork exec to chdir().
workdir = path.Join(id, "work")
if readWrite { if readWrite {
diffDir := path.Join(id, "diff") diffDir := path.Join(id, "diff")
opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), diffDir, workdir) workDir := path.Join(id, "work")
opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), diffDir, workDir)
} else { } else {
opts = fmt.Sprintf("lowerdir=%s:%s", diffDir, strings.Join(absLowers, ":")) opts = fmt.Sprintf("lowerdir=%s:%s", diffDir, strings.Join(absLowers, ":"))
} }
if len(optsList) > 0 { if len(optsList) > 0 {
opts = fmt.Sprintf("%s,%s", opts, strings.Join(optsList, ",")) opts = strings.Join(append([]string{opts}, optsList...), ",")
} }
mountData = label.FormatMountLabel(opts, options.MountLabel) mountData = label.FormatMountLabel(opts, options.MountLabel)
mountFunc = func(source string, target string, mType string, flags uintptr, label string) error { mountFunc = func(source string, target string, mType string, flags uintptr, label string) error {
@ -1697,9 +1706,9 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
} }
// overlay has a check in place to prevent mounting the same file system twice // overlay has a check in place to prevent mounting the same file system twice
// if volatile was already specified. // if volatile was already specified. Yes, the kernel repeats the "work" component.
err = os.RemoveAll(filepath.Join(workdir, "work/incompat/volatile")) err = os.RemoveAll(filepath.Join(workdir, "work", "incompat", "volatile"))
if err != nil && !os.IsNotExist(err) { if err != nil && !errors.Is(err, os.ErrNotExist) {
return "", err return "", err
} }
@ -1769,11 +1778,13 @@ func (d *Driver) Put(id string) error {
if !unmounted { if !unmounted {
if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil && !os.IsNotExist(err) { if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil && !os.IsNotExist(err) {
logrus.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err) logrus.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err)
return fmt.Errorf("unmounting %q: %w", mountpoint, err)
} }
} }
if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) { if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) {
logrus.Debugf("Failed to remove mountpoint %s overlay: %s - %v", id, mountpoint, err) logrus.Debugf("Failed to remove mountpoint %s overlay: %s - %v", id, mountpoint, err)
return fmt.Errorf("removing mount point %q: %w", mountpoint, err)
} }
return nil return nil

View File

@ -575,14 +575,16 @@ func (r *imageStore) Save() error {
if err != nil { if err != nil {
return err return err
} }
if err := ioutils.AtomicWriteFile(rpath, jdata, 0600); err != nil { // This must be done before we write the file, because the process could be terminated
return err // after the file is written but before the lock file is updated.
}
lw, err := r.lockfile.RecordWrite() lw, err := r.lockfile.RecordWrite()
if err != nil { if err != nil {
return err return err
} }
r.lastWrite = lw r.lastWrite = lw
if err := ioutils.AtomicWriteFile(rpath, jdata, 0600); err != nil {
return err
}
return nil return nil
} }

View File

@ -864,33 +864,35 @@ func (r *layerStore) loadMounts() error {
return err return err
} }
layerMounts := []layerMountPoint{} layerMounts := []layerMountPoint{}
if err = json.Unmarshal(data, &layerMounts); len(data) == 0 || err == nil { if len(data) != 0 {
// Clear all of our mount information. If another process if err := json.Unmarshal(data, &layerMounts); err != nil {
// unmounted something, it (along with its zero count) won't return err
// have been encoded into the version of mountpoints.json that
// we're loading, so our count could fall out of sync with it
// if we don't, and if we subsequently change something else,
// we'd pass that error along to other process that reloaded
// the data after we saved it.
for _, layer := range r.layers {
layer.MountPoint = ""
layer.MountCount = 0
} }
// All of the non-zero count values will have been encoded, so }
// we reset the still-mounted ones based on the contents. // Clear all of our mount information. If another process
for _, mount := range layerMounts { // unmounted something, it (along with its zero count) won't
if mount.MountPoint != "" { // have been encoded into the version of mountpoints.json that
if layer, ok := r.lookup(mount.ID); ok { // we're loading, so our count could fall out of sync with it
mounts[mount.MountPoint] = layer // if we don't, and if we subsequently change something else,
layer.MountPoint = mount.MountPoint // we'd pass that error along to other process that reloaded
layer.MountCount = mount.MountCount // the data after we saved it.
} for _, layer := range r.layers {
layer.MountPoint = ""
layer.MountCount = 0
}
// All of the non-zero count values will have been encoded, so
// we reset the still-mounted ones based on the contents.
for _, mount := range layerMounts {
if mount.MountPoint != "" {
if layer, ok := r.lookup(mount.ID); ok {
mounts[mount.MountPoint] = layer
layer.MountPoint = mount.MountPoint
layer.MountCount = mount.MountCount
} }
} }
err = nil
} }
r.bymount = mounts r.bymount = mounts
return err return nil
} }
// save saves the contents of the store to disk. // save saves the contents of the store to disk.
@ -920,6 +922,14 @@ func (r *layerStore) saveLayers(saveLocations layerLocations) error {
} }
r.lockfile.AssertLockedForWriting() r.lockfile.AssertLockedForWriting()
// This must be done before we write the file, because the process could be terminated
// after the file is written but before the lock file is updated.
lw, err := r.lockfile.RecordWrite()
if err != nil {
return err
}
r.lastWrite = lw
for locationIndex := 0; locationIndex < numLayerLocationIndex; locationIndex++ { for locationIndex := 0; locationIndex < numLayerLocationIndex; locationIndex++ {
location := layerLocationFromIndex(locationIndex) location := layerLocationFromIndex(locationIndex)
if location&saveLocations == 0 { if location&saveLocations == 0 {
@ -949,11 +959,6 @@ func (r *layerStore) saveLayers(saveLocations layerLocations) error {
} }
r.layerspathsModified[locationIndex] = opts.ModTime r.layerspathsModified[locationIndex] = opts.ModTime
} }
lw, err := r.lockfile.RecordWrite()
if err != nil {
return err
}
r.lastWrite = lw
return nil return nil
} }
@ -982,14 +987,18 @@ func (r *layerStore) saveMounts() error {
if err != nil { if err != nil {
return err return err
} }
if err = ioutils.AtomicWriteFile(mpath, jmdata, 0600); err != nil {
return err // This must be done before we write the file, because the process could be terminated
} // after the file is written but before the lock file is updated.
lw, err := r.mountsLockfile.RecordWrite() lw, err := r.mountsLockfile.RecordWrite()
if err != nil { if err != nil {
return err return err
} }
r.mountsLastWrite = lw r.mountsLastWrite = lw
if err = ioutils.AtomicWriteFile(mpath, jmdata, 0600); err != nil {
return err
}
return r.loadMounts() return r.loadMounts()
} }

View File

@ -55,7 +55,7 @@ additionalimagestores = [
# can deduplicate pulling of content, disk storage of content and can allow the # can deduplicate pulling of content, disk storage of content and can allow the
# kernel to use less memory when running containers. # kernel to use less memory when running containers.
# containers/storage supports four keys # containers/storage supports three keys
# * enable_partial_images="true" | "false" # * enable_partial_images="true" | "false"
# Tells containers/storage to look for files previously pulled in storage # Tells containers/storage to look for files previously pulled in storage
# rather then always pulling them from the container registry. # rather then always pulling them from the container registry.
@ -75,8 +75,8 @@ pull_options = {enable_partial_images = "false", use_hard_links = "false", ostre
# mappings which the kernel will allow when you later attempt to run a # mappings which the kernel will allow when you later attempt to run a
# container. # container.
# #
# remap-uids = 0:1668442479:65536 # remap-uids = "0:1668442479:65536"
# remap-gids = 0:1668442479:65536 # remap-gids = "0:1668442479:65536"
# Remap-User/Group is a user name which can be used to look up one or more UID/GID # Remap-User/Group is a user name which can be used to look up one or more UID/GID
# ranges in the /etc/subuid or /etc/subgid file. Mappings are set up starting # ranges in the /etc/subuid or /etc/subgid file. Mappings are set up starting
@ -84,7 +84,8 @@ pull_options = {enable_partial_images = "false", use_hard_links = "false", ostre
# range that matches the specified name, and using the length of that range. # range that matches the specified name, and using the length of that range.
# Additional ranges are then assigned, using the ranges which specify the # Additional ranges are then assigned, using the ranges which specify the
# lowest host-level IDs first, to the lowest not-yet-mapped in-container ID, # lowest host-level IDs first, to the lowest not-yet-mapped in-container ID,
# until all of the entries have been used for maps. # until all of the entries have been used for maps. This setting overrides the
# Remap-UIDs/GIDs setting.
# #
# remap-user = "containers" # remap-user = "containers"
# remap-group = "containers" # remap-group = "containers"
@ -100,7 +101,7 @@ pull_options = {enable_partial_images = "false", use_hard_links = "false", ostre
# Auto-userns-min-size is the minimum size for a user namespace created automatically. # Auto-userns-min-size is the minimum size for a user namespace created automatically.
# auto-userns-min-size=1024 # auto-userns-min-size=1024
# #
# Auto-userns-max-size is the minimum size for a user namespace created automatically. # Auto-userns-max-size is the maximum size for a user namespace created automatically.
# auto-userns-max-size=65536 # auto-userns-max-size=65536
[storage.options.overlay] [storage.options.overlay]

View File

@ -10,6 +10,8 @@ import (
"time" "time"
"github.com/BurntSushi/toml" "github.com/BurntSushi/toml"
drivers "github.com/containers/storage/drivers"
_ "github.com/containers/storage/drivers/register"
cfg "github.com/containers/storage/pkg/config" cfg "github.com/containers/storage/pkg/config"
"github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/idtools"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
@ -310,10 +312,21 @@ func getRootlessStorageOpts(rootlessUID int, systemOpts StoreOptions) (StoreOpti
} }
if opts.GraphDriverName == "" { if opts.GraphDriverName == "" {
if len(systemOpts.GraphDriverPriority) == 0 { if len(systemOpts.GraphDriverPriority) == 0 {
if canUseRootlessOverlay(opts.GraphRoot, opts.RunRoot) { driversMap := drivers.ScanPriorDrivers(opts.GraphRoot)
opts.GraphDriverName = overlayDriver
} else { for _, name := range drivers.Priority {
opts.GraphDriverName = "vfs" if _, prior := driversMap[name]; prior {
opts.GraphDriverName = name
break
}
}
if opts.GraphDriverName == "" {
if canUseRootlessOverlay(opts.GraphRoot, opts.RunRoot) {
opts.GraphDriverName = overlayDriver
} else {
opts.GraphDriverName = "vfs"
}
} }
} else { } else {
opts.GraphDriverPriority = systemOpts.GraphDriverPriority opts.GraphDriverPriority = systemOpts.GraphDriverPriority
@ -444,6 +457,16 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) erro
if config.Storage.Options.MountOpt != "" { if config.Storage.Options.MountOpt != "" {
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.mountopt=%s", config.Storage.Driver, config.Storage.Options.MountOpt)) storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.mountopt=%s", config.Storage.Driver, config.Storage.Options.MountOpt))
} }
uidmap, err := idtools.ParseIDMap([]string{config.Storage.Options.RemapUIDs}, "remap-uids")
if err != nil {
return err
}
gidmap, err := idtools.ParseIDMap([]string{config.Storage.Options.RemapGIDs}, "remap-gids")
if err != nil {
return err
}
if config.Storage.Options.RemapUser != "" && config.Storage.Options.RemapGroup == "" { if config.Storage.Options.RemapUser != "" && config.Storage.Options.RemapGroup == "" {
config.Storage.Options.RemapGroup = config.Storage.Options.RemapUser config.Storage.Options.RemapGroup = config.Storage.Options.RemapUser
} }
@ -456,19 +479,9 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) erro
logrus.Warningf("Error initializing ID mappings for %s:%s %v\n", config.Storage.Options.RemapUser, config.Storage.Options.RemapGroup, err) logrus.Warningf("Error initializing ID mappings for %s:%s %v\n", config.Storage.Options.RemapUser, config.Storage.Options.RemapGroup, err)
return err return err
} }
storeOptions.UIDMap = mappings.UIDs() uidmap = mappings.UIDs()
storeOptions.GIDMap = mappings.GIDs() gidmap = mappings.GIDs()
} }
uidmap, err := idtools.ParseIDMap([]string{config.Storage.Options.RemapUIDs}, "remap-uids")
if err != nil {
return err
}
gidmap, err := idtools.ParseIDMap([]string{config.Storage.Options.RemapGIDs}, "remap-gids")
if err != nil {
return err
}
storeOptions.UIDMap = uidmap storeOptions.UIDMap = uidmap
storeOptions.GIDMap = gidmap storeOptions.GIDMap = gidmap
storeOptions.RootAutoNsUser = config.Storage.Options.RootAutoUsernsUser storeOptions.RootAutoNsUser = config.Storage.Options.RootAutoUsernsUser

View File

@ -25,6 +25,16 @@ rootless_storage_path = "$HOME/$UID/containers/storage"
additionalimagestores = [ additionalimagestores = [
] ]
# Remap-UIDs/GIDs is the mapping from UIDs/GIDs as they should appear inside of
# a container, to the UIDs/GIDs as they should appear outside of the container,
# and the length of the range of UIDs/GIDs. Additional mapped sets can be
# listed and will be heeded by libraries, but there are limits to the number of
# mappings which the kernel will allow when you later attempt to run a
# container.
#
remap-uids = "0:1000000000:30000"
remap-gids = "0:1500000000:60000"
[storage.options.overlay] [storage.options.overlay]
# mountopt specifies comma separated list of extra mount options # mountopt specifies comma separated list of extra mount options

View File

@ -37,13 +37,16 @@ func (c *command) Run(arg ...string) ([][]string, error) {
cmd.Stderr = &stderr cmd.Stderr = &stderr
id := uuid.New().String() id := uuid.New().String()
joinedArgs := strings.Join(cmd.Args, " ") joinedArgs := cmd.Path
if len(cmd.Args) > 1 {
joinedArgs = strings.Join(append([]string{cmd.Path}, cmd.Args[1:]...), " ")
}
logger.Log([]string{"ID:" + id, "START", joinedArgs}) logger.Log([]string{"ID:" + id, "START", joinedArgs})
if err := cmd.Run(); err != nil { if err := cmd.Run(); err != nil {
return nil, &Error{ return nil, &Error{
Err: err, Err: err,
Debug: strings.Join([]string{cmd.Path, joinedArgs[1:]}, " "), Debug: joinedArgs,
Stderr: stderr.String(), Stderr: stderr.String(),
} }
} }
@ -61,7 +64,7 @@ func (c *command) Run(arg ...string) ([][]string, error) {
output := make([][]string, len(lines)) output := make([][]string, len(lines))
for i, l := range lines { for i, l := range lines {
output[i] = strings.Fields(l) output[i] = strings.Split(l, "\t")
} }
return output, nil return output, nil

View File

@ -15,5 +15,5 @@ var (
zpoolPropList = []string{"name", "health", "allocated", "size", "free", "readonly", "dedupratio", "fragmentation", "freeing", "leaked"} zpoolPropList = []string{"name", "health", "allocated", "size", "free", "readonly", "dedupratio", "fragmentation", "freeing", "leaked"}
zpoolPropListOptions = strings.Join(zpoolPropList, ",") zpoolPropListOptions = strings.Join(zpoolPropList, ",")
zpoolArgs = []string{"get", "-p", zpoolPropListOptions} zpoolArgs = []string{"get", "-Hp", zpoolPropListOptions}
) )

View File

@ -15,5 +15,5 @@ var (
zpoolPropList = []string{"name", "health", "allocated", "size", "free", "readonly", "dedupratio"} zpoolPropList = []string{"name", "health", "allocated", "size", "free", "readonly", "dedupratio"}
zpoolPropListOptions = strings.Join(zpoolPropList, ",") zpoolPropListOptions = strings.Join(zpoolPropList, ",")
zpoolArgs = []string{"get", "-p", zpoolPropListOptions} zpoolArgs = []string{"get", "-Hp", zpoolPropListOptions}
) )

View File

@ -49,9 +49,6 @@ func GetZpool(name string) (*Zpool, error) {
return nil, err return nil, err
} }
// there is no -H
out = out[1:]
z := &Zpool{Name: name} z := &Zpool{Name: name}
for _, line := range out { for _, line := range out {
if err := z.parseLine(line); err != nil { if err := z.parseLine(line); err != nil {

6
vendor/modules.txt vendored
View File

@ -215,8 +215,8 @@ github.com/containers/ocicrypt/keywrap/pkcs7
github.com/containers/ocicrypt/spec github.com/containers/ocicrypt/spec
github.com/containers/ocicrypt/utils github.com/containers/ocicrypt/utils
github.com/containers/ocicrypt/utils/keyprovider github.com/containers/ocicrypt/utils/keyprovider
# github.com/containers/storage v1.46.2-0.20230508110141-51c23d59f8f3 # github.com/containers/storage v1.46.2-0.20230526114421-55ee2d19292f
## explicit; go 1.18 ## explicit; go 1.19
github.com/containers/storage github.com/containers/storage
github.com/containers/storage/drivers github.com/containers/storage/drivers
github.com/containers/storage/drivers/aufs github.com/containers/storage/drivers/aufs
@ -465,7 +465,7 @@ github.com/mattn/go-shellwords
# github.com/miekg/pkcs11 v1.1.1 # github.com/miekg/pkcs11 v1.1.1
## explicit; go 1.12 ## explicit; go 1.12
github.com/miekg/pkcs11 github.com/miekg/pkcs11
# github.com/mistifyio/go-zfs/v3 v3.0.0 # github.com/mistifyio/go-zfs/v3 v3.0.1
## explicit; go 1.14 ## explicit; go 1.14
github.com/mistifyio/go-zfs/v3 github.com/mistifyio/go-zfs/v3
# github.com/mitchellh/mapstructure v1.5.0 # github.com/mitchellh/mapstructure v1.5.0