Bump c/storage, c/image, c/common for v1.37.0
Bump c/storage to v1.55.0, c/image to v5.32.0, and c/common to v0.60.0 in preparation of the Buildah v1.37.0 and the Podman v5.2 releases [NO NEW TESTS NEEDED] Signed-off-by: tomsweeneyredhat <tsweeney@redhat.com>
This commit is contained in:
		
							parent
							
								
									b4b19f40ee
								
							
						
					
					
						commit
						7de5d5d6e0
					
				
							
								
								
									
										16
									
								
								go.mod
								
								
								
								
							
							
						
						
									
										16
									
								
								go.mod
								
								
								
								
							|  | @ -19,12 +19,12 @@ go 1.21.0 // *****  ATTENTION  WARNING  CAUTION  DANGER  ****** | |||
| 
 | ||||
| require ( | ||||
| 	github.com/containerd/containerd v1.7.18 | ||||
| 	github.com/containernetworking/cni v1.2.2 | ||||
| 	github.com/containers/common v0.59.1-0.20240712101718-237a317152ae | ||||
| 	github.com/containers/image/v5 v5.31.1-0.20240711123249-1dbd8fbbe516 | ||||
| 	github.com/containernetworking/cni v1.2.3 | ||||
| 	github.com/containers/common v0.60.0 | ||||
| 	github.com/containers/image/v5 v5.32.0 | ||||
| 	github.com/containers/luksy v0.0.0-20240618143119-a8846e21c08c | ||||
| 	github.com/containers/ocicrypt v1.2.0 | ||||
| 	github.com/containers/storage v1.54.1-0.20240712125645-98ad80d6d165 | ||||
| 	github.com/containers/storage v1.55.0 | ||||
| 	github.com/cyphar/filepath-securejoin v0.3.1 | ||||
| 	github.com/docker/distribution v2.8.3+incompatible | ||||
| 	github.com/docker/docker v27.1.1+incompatible | ||||
|  | @ -63,7 +63,7 @@ require ( | |||
| 	github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect | ||||
| 	github.com/BurntSushi/toml v1.4.0 // indirect | ||||
| 	github.com/Microsoft/go-winio v0.6.2 // indirect | ||||
| 	github.com/Microsoft/hcsshim v0.12.4 // indirect | ||||
| 	github.com/Microsoft/hcsshim v0.12.5 // indirect | ||||
| 	github.com/VividCortex/ewma v1.2.0 // indirect | ||||
| 	github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect | ||||
| 	github.com/aead/serpent v0.0.0-20160714141033-fba169763ea6 // indirect | ||||
|  | @ -127,9 +127,9 @@ require ( | |||
| 	github.com/mitchellh/mapstructure v1.5.0 // indirect | ||||
| 	github.com/moby/docker-image-spec v1.3.1 // indirect | ||||
| 	github.com/moby/patternmatcher v0.6.0 // indirect | ||||
| 	github.com/moby/sys/mountinfo v0.7.1 // indirect | ||||
| 	github.com/moby/sys/mountinfo v0.7.2 // indirect | ||||
| 	github.com/moby/sys/sequential v0.5.0 // indirect | ||||
| 	github.com/moby/sys/user v0.1.0 // indirect | ||||
| 	github.com/moby/sys/user v0.2.0 // indirect | ||||
| 	github.com/moby/term v0.5.0 // indirect | ||||
| 	github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect | ||||
| 	github.com/modern-go/reflect2 v1.0.2 // indirect | ||||
|  | @ -150,7 +150,7 @@ require ( | |||
| 	github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect | ||||
| 	github.com/ulikunitz/xz v0.5.12 // indirect | ||||
| 	github.com/vbatts/tar-split v0.11.5 // indirect | ||||
| 	github.com/vbauerster/mpb/v8 v8.7.3 // indirect | ||||
| 	github.com/vbauerster/mpb/v8 v8.7.4 // indirect | ||||
| 	github.com/vishvananda/netlink v1.2.1-beta.2 // indirect | ||||
| 	github.com/vishvananda/netns v0.0.4 // indirect | ||||
| 	go.mongodb.org/mongo-driver v1.14.0 // indirect | ||||
|  |  | |||
							
								
								
									
										41
									
								
								go.sum
								
								
								
								
							
							
						
						
									
										41
									
								
								go.sum
								
								
								
								
							|  | @ -12,8 +12,8 @@ github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0 | |||
| github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= | ||||
| github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= | ||||
| github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= | ||||
| github.com/Microsoft/hcsshim v0.12.4 h1:Ev7YUMHAHoWNm+aDSPzc5W9s6E2jyL1szpVDJeZ/Rr4= | ||||
| github.com/Microsoft/hcsshim v0.12.4/go.mod h1:Iyl1WVpZzr+UkzjekHZbV8o5Z9ZkxNGx6CtY2Qg/JVQ= | ||||
| github.com/Microsoft/hcsshim v0.12.5 h1:bpTInLlDy/nDRWFVcefDZZ1+U8tS+rz3MxjKgu9boo0= | ||||
| github.com/Microsoft/hcsshim v0.12.5/go.mod h1:tIUGego4G1EN5Hb6KC90aDYiUI2dqLSTTOCjVNpOgZ8= | ||||
| github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow= | ||||
| github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4= | ||||
| github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8= | ||||
|  | @ -57,22 +57,22 @@ github.com/containerd/stargz-snapshotter/estargz v0.15.1 h1:eXJjw9RbkLFgioVaTG+G | |||
| github.com/containerd/stargz-snapshotter/estargz v0.15.1/go.mod h1:gr2RNwukQ/S9Nv33Lt6UC7xEx58C+LHRdoqbEKjz1Kk= | ||||
| github.com/containerd/typeurl/v2 v2.1.1 h1:3Q4Pt7i8nYwy2KmQWIw2+1hTvwTE/6w9FqcttATPO/4= | ||||
| github.com/containerd/typeurl/v2 v2.1.1/go.mod h1:IDp2JFvbwZ31H8dQbEIY7sDl2L3o3HZj1hsSQlywkQ0= | ||||
| github.com/containernetworking/cni v1.2.2 h1:9IbP6KJQQxVKo4hhnm8r50YcVKrJbJu3Dqw+Rbt1vYk= | ||||
| github.com/containernetworking/cni v1.2.2/go.mod h1:DuLgF+aPd3DzcTQTtp/Nvl1Kim23oFKdm2okJzBQA5M= | ||||
| github.com/containernetworking/cni v1.2.3 h1:hhOcjNVUQTnzdRJ6alC5XF+wd9mfGIUaj8FuJbEslXM= | ||||
| github.com/containernetworking/cni v1.2.3/go.mod h1:DuLgF+aPd3DzcTQTtp/Nvl1Kim23oFKdm2okJzBQA5M= | ||||
| github.com/containernetworking/plugins v1.5.1 h1:T5ji+LPYjjgW0QM+KyrigZbLsZ8jaX+E5J/EcKOE4gQ= | ||||
| github.com/containernetworking/plugins v1.5.1/go.mod h1:MIQfgMayGuHYs0XdNudf31cLLAC+i242hNm6KuDGqCM= | ||||
| github.com/containers/common v0.59.1-0.20240712101718-237a317152ae h1:KtbMAKs/DMU2UOrAUd381rXld5UAJdkHZRn2BLEfmCM= | ||||
| github.com/containers/common v0.59.1-0.20240712101718-237a317152ae/go.mod h1:KrQ9y5qa7TBVzp7qs7I1MVi6Uxntu0hM5wjd5bmvMnM= | ||||
| github.com/containers/image/v5 v5.31.1-0.20240711123249-1dbd8fbbe516 h1:BVyB11XLbT7s0tMF1qzdc5R04gO2BRAdjbftRwNoLXM= | ||||
| github.com/containers/image/v5 v5.31.1-0.20240711123249-1dbd8fbbe516/go.mod h1:iAUT9Iy/z0QPrYeILorryErMUxm4GlRzBE0Yz65l/uE= | ||||
| github.com/containers/common v0.60.0 h1:QMNygqiiit9LU/yqee9Dv0N0oQ+rQq41ElfdOBQtw7w= | ||||
| github.com/containers/common v0.60.0/go.mod h1:dtKVe11xkV89tqzRX9s/B0ORjeB2dy5UB46aGjunMn8= | ||||
| github.com/containers/image/v5 v5.32.0 h1:yjbweazPfr8xOzQ2hkkYm1A2V0jN96/kES6Gwyxj7hQ= | ||||
| github.com/containers/image/v5 v5.32.0/go.mod h1:x5e0RDfGaY6bnQ13gJ2LqbfHvzssfB/y5a8HduGFxJc= | ||||
| github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= | ||||
| github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= | ||||
| github.com/containers/luksy v0.0.0-20240618143119-a8846e21c08c h1:gJDiBJYc8JFD46IJmr8SqGOcueGSRGnuhW6wgXiAjr0= | ||||
| github.com/containers/luksy v0.0.0-20240618143119-a8846e21c08c/go.mod h1:Ufusu7xAtl0LSTry0JS6dSxbxR/XJQSEqlhLqTkCaH8= | ||||
| github.com/containers/ocicrypt v1.2.0 h1:X14EgRK3xNFvJEfI5O4Qn4T3E25ANudSOZz/sirVuPM= | ||||
| github.com/containers/ocicrypt v1.2.0/go.mod h1:ZNviigQajtdlxIZGibvblVuIFBKIuUI2M0QM12SD31U= | ||||
| github.com/containers/storage v1.54.1-0.20240712125645-98ad80d6d165 h1:9WbIxink8kCOoMNh9ju4CMdrrxwnbQMV1YJD8sUXt+k= | ||||
| github.com/containers/storage v1.54.1-0.20240712125645-98ad80d6d165/go.mod h1:EyuSB0B1ddqXN0pXGNKPrtxzma80jhRCeVl7/J/JAhE= | ||||
| github.com/containers/storage v1.55.0 h1:wTWZ3YpcQf1F+dSP4KxG9iqDfpQY1otaUXjPpffuhgg= | ||||
| github.com/containers/storage v1.55.0/go.mod h1:28cB81IDk+y7ok60Of6u52RbCeBRucbFOeLunhER1RQ= | ||||
| github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= | ||||
| github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= | ||||
| github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= | ||||
|  | @ -90,8 +90,8 @@ github.com/disiqueira/gotree/v3 v3.0.2 h1:ik5iuLQQoufZBNPY518dXhiO5056hyNBIK9lWh | |||
| github.com/disiqueira/gotree/v3 v3.0.2/go.mod h1:ZuyjE4+mUQZlbpkI24AmruZKhg3VHEgPLDY8Qk+uUu8= | ||||
| github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= | ||||
| github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= | ||||
| github.com/docker/cli v27.0.3+incompatible h1:usGs0/BoBW8MWxGeEtqPMkzOY56jZ6kYlSN5BLDioCQ= | ||||
| github.com/docker/cli v27.0.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= | ||||
| github.com/docker/cli v27.1.1+incompatible h1:goaZxOqs4QKxznZjjBWKONQci/MywhtRv2oNn0GkeZE= | ||||
| github.com/docker/cli v27.1.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= | ||||
| github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= | ||||
| github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= | ||||
| github.com/docker/docker v27.1.1+incompatible h1:hO/M4MtV36kzKldqnA37IWhebRA+LnqqcqDja6kVaKY= | ||||
|  | @ -244,12 +244,12 @@ github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3N | |||
| github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= | ||||
| github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= | ||||
| github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= | ||||
| github.com/moby/sys/mountinfo v0.7.1 h1:/tTvQaSJRr2FshkhXiIpux6fQ2Zvc4j7tAhMTStAG2g= | ||||
| github.com/moby/sys/mountinfo v0.7.1/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= | ||||
| github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg= | ||||
| github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4= | ||||
| github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= | ||||
| github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= | ||||
| github.com/moby/sys/user v0.1.0 h1:WmZ93f5Ux6het5iituh9x2zAG7NFY9Aqi49jjE1PaQg= | ||||
| github.com/moby/sys/user v0.1.0/go.mod h1:fKJhFOnsCN6xZ5gSfbM6zaHGgDJMrqt9/reuj4T7MmU= | ||||
| github.com/moby/sys/user v0.2.0 h1:OnpapJsRp25vkhw8TFG6OLJODNh/3rEwRWtJ3kakwRM= | ||||
| github.com/moby/sys/user v0.2.0/go.mod h1:RYstrcWOJpVh+6qzUqp2bU3eaRpdiQeKGlKitaH0PM8= | ||||
| github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= | ||||
| github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= | ||||
| github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= | ||||
|  | @ -295,8 +295,8 @@ github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZ | |||
| github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8= | ||||
| github.com/prometheus/common v0.51.1 h1:eIjN50Bwglz6a/c3hAgSMcofL3nD+nFQkV6Dd4DsQCw= | ||||
| github.com/prometheus/common v0.51.1/go.mod h1:lrWtQx+iDfn2mbH5GUzlH9TSHyfZpHkSiG1W7y3sF2Q= | ||||
| github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= | ||||
| github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= | ||||
| github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= | ||||
| github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= | ||||
| github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= | ||||
| github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= | ||||
| github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= | ||||
|  | @ -347,8 +347,8 @@ github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc= | |||
| github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= | ||||
| github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts= | ||||
| github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk= | ||||
| github.com/vbauerster/mpb/v8 v8.7.3 h1:n/mKPBav4FFWp5fH4U0lPpXfiOmCEgl5Yx/NM3tKJA0= | ||||
| github.com/vbauerster/mpb/v8 v8.7.3/go.mod h1:9nFlNpDGVoTmQ4QvNjSLtwLmAFjwmq0XaAF26toHGNM= | ||||
| github.com/vbauerster/mpb/v8 v8.7.4 h1:p4f16iMfUt3PkAC73SCzAtgtSf8TYDqEbJUT3odPrPo= | ||||
| github.com/vbauerster/mpb/v8 v8.7.4/go.mod h1:r1B5k2Ljj5KJFCekfihbiqyV4VaaRTANYmvWA2btufI= | ||||
| github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs= | ||||
| github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= | ||||
| github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= | ||||
|  | @ -429,7 +429,6 @@ golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7w | |||
| golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | ||||
| golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= | ||||
| golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= | ||||
| golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= | ||||
| golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= | ||||
| golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= | ||||
| golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= | ||||
|  |  | |||
|  | @ -0,0 +1,46 @@ | |||
| //go:build windows
 | ||||
| 
 | ||||
| package hcsshim | ||||
| 
 | ||||
| import ( | ||||
| 	"errors" | ||||
| 
 | ||||
| 	"github.com/Microsoft/hcsshim/internal/hns" | ||||
| ) | ||||
| 
 | ||||
| // HNSNnvManagementMacAddress represents management mac address
 | ||||
| // which needs to be excluded from VF reassignment
 | ||||
| type HNSNnvManagementMacAddress = hns.HNSNnvManagementMacAddress | ||||
| 
 | ||||
| // HNSNnvManagementMacList represents a list of management
 | ||||
| // mac addresses for exclusion from VF reassignment
 | ||||
| type HNSNnvManagementMacList = hns.HNSNnvManagementMacList | ||||
| 
 | ||||
| var ( | ||||
| 	ErrorEmptyMacAddressList = errors.New("management mac_address list is empty") | ||||
| ) | ||||
| 
 | ||||
| // SetNnvManagementMacAddresses sets a list of
 | ||||
| // management mac addresses in hns for exclusion from VF reassignment.
 | ||||
| func SetNnvManagementMacAddresses(managementMacAddresses []string) (*HNSNnvManagementMacList, error) { | ||||
| 	if len(managementMacAddresses) == 0 { | ||||
| 		return nil, ErrorEmptyMacAddressList | ||||
| 	} | ||||
| 	nnvManagementMacList := &HNSNnvManagementMacList{} | ||||
| 	for _, mac := range managementMacAddresses { | ||||
| 		nnvManagementMacList.MacAddressList = append(nnvManagementMacList.MacAddressList, HNSNnvManagementMacAddress{MacAddress: mac}) | ||||
| 	} | ||||
| 	return nnvManagementMacList.Set() | ||||
| } | ||||
| 
 | ||||
| // GetNnvManagementMacAddresses retrieves a list of
 | ||||
| // management mac addresses in hns for exclusion from VF reassignment.
 | ||||
| func GetNnvManagementMacAddresses() (*HNSNnvManagementMacList, error) { | ||||
| 	return hns.GetNnvManagementMacAddressList() | ||||
| } | ||||
| 
 | ||||
| // DeleteNnvManagementMacAddresses delete list of
 | ||||
| // management mac addresses in hns which are excluded from VF reassignment.
 | ||||
| func DeleteNnvManagementMacAddresses() (*HNSNnvManagementMacList, error) { | ||||
| 	return hns.DeleteNnvManagementMacAddressList() | ||||
| } | ||||
|  | @ -0,0 +1,60 @@ | |||
| //go:build windows
 | ||||
| 
 | ||||
| package hns | ||||
| 
 | ||||
| import ( | ||||
| 	"encoding/json" | ||||
| 
 | ||||
| 	"github.com/sirupsen/logrus" | ||||
| ) | ||||
| 
 | ||||
| // HNSNnvManagementMacAddress represents management mac address
 | ||||
| // which needs to be excluded from VF reassignment
 | ||||
| type HNSNnvManagementMacAddress struct { | ||||
| 	MacAddress string `json:",omitempty"` | ||||
| } | ||||
| 
 | ||||
| // HNSNnvManagementMacList represents a list of management
 | ||||
| // mac addresses for exclusion from VF reassignment
 | ||||
| type HNSNnvManagementMacList struct { | ||||
| 	MacAddressList []HNSNnvManagementMacAddress `json:",omitempty"` | ||||
| } | ||||
| 
 | ||||
| // HNSNnvManagementMacRequest makes a HNS call to modify/query NnvManagementMacList
 | ||||
| func HNSNnvManagementMacRequest(method, path, request string) (*HNSNnvManagementMacList, error) { | ||||
| 	nnvManagementMacList := &HNSNnvManagementMacList{} | ||||
| 	err := hnsCall(method, "/accelnet/"+path, request, &nnvManagementMacList) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	return nnvManagementMacList, nil | ||||
| } | ||||
| 
 | ||||
| // Set ManagementMacAddressList by sending "POST" NnvManagementMacRequest to HNS.
 | ||||
| func (nnvManagementMacList *HNSNnvManagementMacList) Set() (*HNSNnvManagementMacList, error) { | ||||
| 	operation := "Set" | ||||
| 	title := "hcsshim::nnvManagementMacList::" + operation | ||||
| 	logrus.Debugf(title+" id=%s", nnvManagementMacList.MacAddressList) | ||||
| 
 | ||||
| 	jsonString, err := json.Marshal(nnvManagementMacList) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	return HNSNnvManagementMacRequest("POST", "", string(jsonString)) | ||||
| } | ||||
| 
 | ||||
| // Get ManagementMacAddressList by sending "GET" NnvManagementMacRequest to HNS.
 | ||||
| func GetNnvManagementMacAddressList() (*HNSNnvManagementMacList, error) { | ||||
| 	operation := "Get" | ||||
| 	title := "hcsshim::nnvManagementMacList::" + operation | ||||
| 	logrus.Debugf(title) | ||||
| 	return HNSNnvManagementMacRequest("GET", "", "") | ||||
| } | ||||
| 
 | ||||
| // Delete ManagementMacAddressList by sending "DELETE" NnvManagementMacRequest to HNS.
 | ||||
| func DeleteNnvManagementMacAddressList() (*HNSNnvManagementMacList, error) { | ||||
| 	operation := "Delete" | ||||
| 	title := "hcsshim::nnvManagementMacList::" + operation | ||||
| 	logrus.Debugf(title) | ||||
| 	return HNSNnvManagementMacRequest("DELETE", "", "") | ||||
| } | ||||
|  | @ -10,6 +10,28 @@ import ( | |||
| 	"github.com/sirupsen/logrus" | ||||
| ) | ||||
| 
 | ||||
| // EndpointState represents the states of an HNS Endpoint lifecycle.
 | ||||
| type EndpointState uint16 | ||||
| 
 | ||||
| // EndpointState const
 | ||||
| // The lifecycle of an Endpoint goes through created, attached, AttachedSharing - endpoint is being shared with other containers,
 | ||||
| // detached, after being attached, degraded and finally destroyed.
 | ||||
| // Note: This attribute is used by calico to define stale containers and is dependent on HNS v1 api, if we move to HNS v2 api we will need
 | ||||
| // to update the current calico code and cordinate the change with calico. Reach out to Microsoft to facilate the change via HNS.
 | ||||
| const ( | ||||
| 	Uninitialized   EndpointState = iota | ||||
| 	Created         EndpointState = 1 | ||||
| 	Attached        EndpointState = 2 | ||||
| 	AttachedSharing EndpointState = 3 | ||||
| 	Detached        EndpointState = 4 | ||||
| 	Degraded        EndpointState = 5 | ||||
| 	Destroyed       EndpointState = 6 | ||||
| ) | ||||
| 
 | ||||
| func (es EndpointState) String() string { | ||||
| 	return [...]string{"Uninitialized", "Attached", "AttachedSharing", "Detached", "Degraded", "Destroyed"}[es] | ||||
| } | ||||
| 
 | ||||
| // HNSEndpoint represents a network endpoint in HNS
 | ||||
| type HNSEndpoint struct { | ||||
| 	Id                 string            `json:"ID,omitempty"` | ||||
|  | @ -34,6 +56,7 @@ type HNSEndpoint struct { | |||
| 	Namespace          *Namespace        `json:",omitempty"` | ||||
| 	EncapOverhead      uint16            `json:",omitempty"` | ||||
| 	SharedContainers   []string          `json:",omitempty"` | ||||
| 	State              EndpointState     `json:",omitempty"` | ||||
| } | ||||
| 
 | ||||
| // SystemType represents the type of the system on which actions are done
 | ||||
|  |  | |||
|  | @ -817,6 +817,8 @@ func (c *CNIConfig) GCNetworkList(ctx context.Context, list *NetworkConfigList, | |||
| 		} | ||||
| 		if args != nil { | ||||
| 			inject["cni.dev/valid-attachments"] = args.ValidAttachments | ||||
| 			// #1101: spec used incorrect variable name
 | ||||
| 			inject["cni.dev/attachments"] = args.ValidAttachments | ||||
| 		} | ||||
| 
 | ||||
| 		for _, plugin := range list.Plugins { | ||||
|  |  | |||
|  | @ -12,7 +12,6 @@ import ( | |||
| 	"strings" | ||||
| 	"time" | ||||
| 
 | ||||
| 	"github.com/containers/common/libimage/manifests" | ||||
| 	"github.com/containers/common/libimage/platform" | ||||
| 	"github.com/containers/common/pkg/config" | ||||
| 	"github.com/containers/common/pkg/retry" | ||||
|  | @ -32,12 +31,6 @@ const ( | |||
| 	defaultRetryDelay = time.Second | ||||
| ) | ||||
| 
 | ||||
| // LookupReferenceFunc return an image reference based on the specified one.
 | ||||
| // The returned reference can return custom ImageSource or ImageDestination
 | ||||
| // objects which intercept or filter blobs, manifests, and signatures as
 | ||||
| // they are read and written.
 | ||||
| type LookupReferenceFunc = manifests.LookupReferenceFunc | ||||
| 
 | ||||
| // CopyOptions allow for customizing image-copy operations.
 | ||||
| type CopyOptions struct { | ||||
| 	// If set, will be used for copying the image.  Fields below may
 | ||||
|  |  | |||
|  | @ -88,6 +88,8 @@ func (r *Runtime) compileImageFilters(ctx context.Context, options *ListImagesOp | |||
| 		return tree, nil | ||||
| 	} | ||||
| 
 | ||||
| 	filterInvalidValue := `invalid image filter %q: must be in the format "filter=value or filter!=value"` | ||||
| 
 | ||||
| 	var wantedReferenceMatches, unwantedReferenceMatches []string | ||||
| 	filters := map[string][]filterFunc{} | ||||
| 	duplicate := map[string]string{} | ||||
|  | @ -101,7 +103,7 @@ func (r *Runtime) compileImageFilters(ctx context.Context, options *ListImagesOp | |||
| 		} else { | ||||
| 			split = strings.SplitN(f, "=", 2) | ||||
| 			if len(split) != 2 { | ||||
| 				return nil, fmt.Errorf("invalid image filter %q: must be in the format %q", f, "filter=value or filter!=value") | ||||
| 				return nil, fmt.Errorf(filterInvalidValue, f) | ||||
| 			} | ||||
| 		} | ||||
| 
 | ||||
|  | @ -195,7 +197,7 @@ func (r *Runtime) compileImageFilters(ctx context.Context, options *ListImagesOp | |||
| 			filter = filterBefore(until) | ||||
| 
 | ||||
| 		default: | ||||
| 			return nil, fmt.Errorf("unsupported image filter %q", key) | ||||
| 			return nil, fmt.Errorf(filterInvalidValue, key) | ||||
| 		} | ||||
| 		if negate { | ||||
| 			filter = negateFilter(filter) | ||||
|  |  | |||
|  | @ -135,7 +135,7 @@ func (r *Runtime) layerTree(ctx context.Context, images []*Image) (*layerTree, e | |||
| 			// mistake. Users may not be able to recover, so we're now
 | ||||
| 			// throwing a warning to guide them to resolve the issue and
 | ||||
| 			// turn the errors non-fatal.
 | ||||
| 			logrus.Warnf("Top layer %s of image %s not found in layer tree. The storage may be corrupted, consider running `podman system reset`.", topLayer, img.ID()) | ||||
| 			logrus.Warnf("Top layer %s of image %s not found in layer tree. The storage may be corrupted, consider running `podman system check`.", topLayer, img.ID()) | ||||
| 			continue | ||||
| 		} | ||||
| 		node.images = append(node.images, img) | ||||
|  | @ -234,7 +234,7 @@ func (t *layerTree) children(ctx context.Context, parent *Image, all bool) ([]*I | |||
| 		// mistake. Users may not be able to recover, so we're now
 | ||||
| 		// throwing a warning to guide them to resolve the issue and
 | ||||
| 		// turn the errors non-fatal.
 | ||||
| 		logrus.Warnf("Layer %s not found in layer tree. The storage may be corrupted, consider running `podman system reset`.", parent.TopLayer()) | ||||
| 		logrus.Warnf("Layer %s not found in layer tree. The storage may be corrupted, consider running `podman system check`.", parent.TopLayer()) | ||||
| 		return children, nil | ||||
| 	} | ||||
| 
 | ||||
|  | @ -336,7 +336,7 @@ func (t *layerTree) parent(ctx context.Context, child *Image) (*Image, error) { | |||
| 		// mistake. Users may not be able to recover, so we're now
 | ||||
| 		// throwing a warning to guide them to resolve the issue and
 | ||||
| 		// turn the errors non-fatal.
 | ||||
| 		logrus.Warnf("Layer %s not found in layer tree. The storage may be corrupted, consider running `podman system reset`.", child.TopLayer()) | ||||
| 		logrus.Warnf("Layer %s not found in layer tree. The storage may be corrupted, consider running `podman system check`.", child.TopLayer()) | ||||
| 		return nil, nil | ||||
| 	} | ||||
| 
 | ||||
|  |  | |||
|  | @ -0,0 +1,9 @@ | |||
| package libimage | ||||
| 
 | ||||
| import "github.com/containers/common/libimage/manifests" | ||||
| 
 | ||||
| // LookupReferenceFunc return an image reference based on the specified one.
 | ||||
| // The returned reference can return custom ImageSource or ImageDestination
 | ||||
| // objects which intercept or filter blobs, manifests, and signatures as
 | ||||
| // they are read and written.
 | ||||
| type LookupReferenceFunc = manifests.LookupReferenceFunc | ||||
|  | @ -1,4 +1,4 @@ | |||
| package version | ||||
| 
 | ||||
| // Version is the version of the build.
 | ||||
| const Version = "0.60.0-dev" | ||||
| const Version = "0.60.0" | ||||
|  |  | |||
|  | @ -73,7 +73,7 @@ type bpCompressionStepData struct { | |||
| 	operation              bpcOperation                // What we are actually doing
 | ||||
| 	uploadedOperation      types.LayerCompression      // Operation to use for updating the blob metadata (matching the end state, not necessarily what we do)
 | ||||
| 	uploadedAlgorithm      *compressiontypes.Algorithm // An algorithm parameter for the compressionOperation edits.
 | ||||
| 	uploadedAnnotations    map[string]string           // Annotations that should be set on the uploaded blob. WARNING: This is only set after the srcStream.reader is fully consumed.
 | ||||
| 	uploadedAnnotations    map[string]string           // Compression-related annotations that should be set on the uploaded blob. WARNING: This is only set after the srcStream.reader is fully consumed.
 | ||||
| 	srcCompressorName      string                      // Compressor name to record in the blob info cache for the source blob.
 | ||||
| 	uploadedCompressorName string                      // Compressor name to record in the blob info cache for the uploaded blob.
 | ||||
| 	closers                []io.Closer                 // Objects to close after the upload is done, if any.
 | ||||
|  | @ -323,7 +323,11 @@ func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInf | |||
| 			return fmt.Errorf("Internal error: Unexpected d.operation value %#v", d.operation) | ||||
| 		} | ||||
| 	} | ||||
| 	if d.uploadedCompressorName != "" && d.uploadedCompressorName != internalblobinfocache.UnknownCompression { | ||||
| 	if d.srcCompressorName == "" || d.uploadedCompressorName == "" { | ||||
| 		return fmt.Errorf("internal error: missing compressor names (src: %q, uploaded: %q)", | ||||
| 			d.srcCompressorName, d.uploadedCompressorName) | ||||
| 	} | ||||
| 	if d.uploadedCompressorName != internalblobinfocache.UnknownCompression { | ||||
| 		if d.uploadedCompressorName != compressiontypes.ZstdChunkedAlgorithmName { | ||||
| 			// HACK: Don’t record zstd:chunked algorithms.
 | ||||
| 			// There is already a similar hack in internal/imagedestination/impl/helpers.CandidateMatchesTryReusingBlobOptions,
 | ||||
|  | @ -337,7 +341,7 @@ func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInf | |||
| 		} | ||||
| 	} | ||||
| 	if srcInfo.Digest != "" && srcInfo.Digest != uploadedInfo.Digest && | ||||
| 		d.srcCompressorName != "" && d.srcCompressorName != internalblobinfocache.UnknownCompression { | ||||
| 		d.srcCompressorName != internalblobinfocache.UnknownCompression { | ||||
| 		if d.srcCompressorName != compressiontypes.ZstdChunkedAlgorithmName { | ||||
| 			// HACK: Don’t record zstd:chunked algorithms, see above.
 | ||||
| 			c.blobInfoCache.RecordDigestCompressorName(srcInfo.Digest, d.srcCompressorName) | ||||
|  |  | |||
|  | @ -361,8 +361,6 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context, | |||
| 				logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err) | ||||
| 				continue | ||||
| 			} | ||||
| 		} | ||||
| 		if !candidate.UnknownLocation { | ||||
| 			if candidate.CompressionAlgorithm != nil { | ||||
| 				logrus.Debugf("Trying to reuse blob with cached digest %s compressed with %s in destination repo %s", candidate.Digest.String(), candidate.CompressionAlgorithm.Name(), candidateRepo.Name()) | ||||
| 			} else { | ||||
|  |  | |||
|  | @ -27,7 +27,7 @@ type cache struct { | |||
| 	uncompressedDigests   map[digest.Digest]digest.Digest | ||||
| 	digestsByUncompressed map[digest.Digest]*set.Set[digest.Digest]                // stores a set of digests for each uncompressed digest
 | ||||
| 	knownLocations        map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference
 | ||||
| 	compressors           map[digest.Digest]string                                 // stores a compressor name, or blobinfocache.Unknown (not blobinfocache.UnknownCompression), for each digest
 | ||||
| 	compressors           map[digest.Digest]string                                 // stores a compressor name, or blobinfocache.Uncompressed (not blobinfocache.UnknownCompression), for each digest
 | ||||
| } | ||||
| 
 | ||||
| // New returns a BlobInfoCache implementation which is in-memory only.
 | ||||
|  |  | |||
|  | @ -11,7 +11,7 @@ const ( | |||
| 	VersionPatch = 0 | ||||
| 
 | ||||
| 	// VersionDev indicates development branch. Releases will be empty string.
 | ||||
| 	VersionDev = "-dev" | ||||
| 	VersionDev = "" | ||||
| ) | ||||
| 
 | ||||
| // Version is the specification version that the package types support.
 | ||||
|  |  | |||
|  | @ -1 +1 @@ | |||
| 1.55.0-dev | ||||
| 1.55.0 | ||||
|  |  | |||
|  | @ -8,6 +8,7 @@ import ( | |||
| 	"encoding/binary" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"io/fs" | ||||
| 	"os" | ||||
| 	"os/exec" | ||||
| 	"path/filepath" | ||||
|  | @ -56,7 +57,7 @@ func generateComposeFsBlob(verityDigests map[string]string, toc interface{}, com | |||
| 
 | ||||
| 	fd, err := unix.Openat(unix.AT_FDCWD, destFile, unix.O_WRONLY|unix.O_CREAT|unix.O_TRUNC|unix.O_EXCL|unix.O_CLOEXEC, 0o644) | ||||
| 	if err != nil { | ||||
| 		return fmt.Errorf("failed to open output file %q: %w", destFile, err) | ||||
| 		return &fs.PathError{Op: "openat", Path: destFile, Err: err} | ||||
| 	} | ||||
| 	outFd := os.NewFile(uintptr(fd), "outFd") | ||||
| 
 | ||||
|  | @ -117,7 +118,7 @@ func hasACL(path string) (bool, error) { | |||
| 
 | ||||
| 	fd, err := unix.Openat(unix.AT_FDCWD, path, unix.O_RDONLY|unix.O_CLOEXEC, 0) | ||||
| 	if err != nil { | ||||
| 		return false, err | ||||
| 		return false, &fs.PathError{Op: "openat", Path: path, Err: err} | ||||
| 	} | ||||
| 	defer unix.Close(fd) | ||||
| 	// do not worry about checking the magic number, if the file is invalid
 | ||||
|  | @ -125,7 +126,7 @@ func hasACL(path string) (bool, error) { | |||
| 	flags := make([]byte, 4) | ||||
| 	nread, err := unix.Pread(fd, flags, 8) | ||||
| 	if err != nil { | ||||
| 		return false, err | ||||
| 		return false, fmt.Errorf("pread %q: %w", path, err) | ||||
| 	} | ||||
| 	if nread != 4 { | ||||
| 		return false, fmt.Errorf("failed to read flags from %q", path) | ||||
|  | @ -150,5 +151,8 @@ func mountComposefsBlob(dataDir, mountPoint string) error { | |||
| 		mountOpts += ",noacl" | ||||
| 	} | ||||
| 
 | ||||
| 	return unix.Mount(loop.Name(), mountPoint, "erofs", unix.MS_RDONLY, mountOpts) | ||||
| 	if err := unix.Mount(loop.Name(), mountPoint, "erofs", unix.MS_RDONLY, mountOpts); err != nil { | ||||
| 		return fmt.Errorf("failed to mount erofs image at %q: %w", mountPoint, err) | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  |  | |||
|  | @ -848,14 +848,14 @@ func (d *Driver) Status() [][2]string { | |||
| // Metadata returns meta data about the overlay driver such as
 | ||||
| // LowerDir, UpperDir, WorkDir and MergeDir used to store data.
 | ||||
| func (d *Driver) Metadata(id string) (map[string]string, error) { | ||||
| 	dir := d.dir(id) | ||||
| 	dir, _, inAdditionalStore := d.dir2(id, false) | ||||
| 	if err := fileutils.Exists(dir); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	metadata := map[string]string{ | ||||
| 		"WorkDir":   path.Join(dir, "work"), | ||||
| 		"MergedDir": path.Join(dir, "merged"), | ||||
| 		"MergedDir": d.getMergedDir(id, dir, inAdditionalStore), | ||||
| 		"UpperDir":  path.Join(dir, "diff"), | ||||
| 	} | ||||
| 
 | ||||
|  | @ -1703,10 +1703,10 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO | |||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	mergedDir := path.Join(dir, "merged") | ||||
| 	mergedDir := d.getMergedDir(id, dir, inAdditionalStore) | ||||
| 	// Attempt to create the merged dir only if it doesn't exist.
 | ||||
| 	if err := fileutils.Exists(mergedDir); err != nil && os.IsNotExist(err) { | ||||
| 		if err := idtools.MkdirAs(mergedDir, 0o700, rootUID, rootGID); err != nil && !os.IsExist(err) { | ||||
| 		if err := idtools.MkdirAllAs(mergedDir, 0o700, rootUID, rootGID); err != nil && !os.IsExist(err) { | ||||
| 			return "", err | ||||
| 		} | ||||
| 	} | ||||
|  | @ -1856,8 +1856,10 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO | |||
| 		mountFunc = func(source string, target string, mType string, flags uintptr, label string) error { | ||||
| 			return mountOverlayFrom(d.home, source, target, mType, flags, label) | ||||
| 		} | ||||
| 		if !inAdditionalStore { | ||||
| 			mountTarget = path.Join(id, "merged") | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	// overlay has a check in place to prevent mounting the same file system twice
 | ||||
| 	// if volatile was already specified. Yes, the kernel repeats the "work" component.
 | ||||
|  | @ -1875,13 +1877,26 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO | |||
| 	return mergedDir, nil | ||||
| } | ||||
| 
 | ||||
| // getMergedDir returns the directory path that should be used as the mount point for the overlayfs.
 | ||||
| func (d *Driver) getMergedDir(id, dir string, inAdditionalStore bool) string { | ||||
| 	// If the layer is in an additional store, the lock we might hold only a reading lock.  To prevent
 | ||||
| 	// races with other processes, use a private directory under the main store rundir.  At this point, the
 | ||||
| 	// current process is holding an exclusive lock on the store, and since the rundir cannot be shared for
 | ||||
| 	// different stores, it is safe to assume the current process has exclusive access to it.
 | ||||
| 	if inAdditionalStore { | ||||
| 		return path.Join(d.runhome, id, "merged") | ||||
| 	} | ||||
| 	return path.Join(dir, "merged") | ||||
| } | ||||
| 
 | ||||
| // Put unmounts the mount path created for the give id.
 | ||||
| func (d *Driver) Put(id string) error { | ||||
| 	dir, _, inAdditionalStore := d.dir2(id, false) | ||||
| 	if err := fileutils.Exists(dir); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	mountpoint := path.Join(dir, "merged") | ||||
| 	mountpoint := d.getMergedDir(id, dir, inAdditionalStore) | ||||
| 
 | ||||
| 	if count := d.ctr.Decrement(mountpoint); count > 0 { | ||||
| 		return nil | ||||
| 	} | ||||
|  | @ -1938,7 +1953,15 @@ func (d *Driver) Put(id string) error { | |||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	if !inAdditionalStore { | ||||
| 	if inAdditionalStore { | ||||
| 		// check the base name for extra safety
 | ||||
| 		if strings.HasPrefix(mountpoint, d.runhome) && filepath.Base(mountpoint) == "merged" { | ||||
| 			err := os.RemoveAll(filepath.Dir(mountpoint)) | ||||
| 			if err != nil { | ||||
| 				logrus.Warningf("Failed to remove mountpoint %s overlay: %s: %v", id, mountpoint, err) | ||||
| 			} | ||||
| 		} | ||||
| 	} else { | ||||
| 		uid, gid := int(0), int(0) | ||||
| 		fi, err := os.Stat(mountpoint) | ||||
| 		if err != nil { | ||||
|  | @ -1955,7 +1978,7 @@ func (d *Driver) Put(id string) error { | |||
| 		// rename(2) can be used on an empty directory, as it is the mountpoint after umount, and it retains
 | ||||
| 		// its atomic semantic.  In this way the "merged" directory is never removed.
 | ||||
| 		if err := unix.Rename(tmpMountpoint, mountpoint); err != nil { | ||||
| 			logrus.Debugf("Failed to replace mountpoint %s overlay: %s - %v", id, mountpoint, err) | ||||
| 			logrus.Debugf("Failed to replace mountpoint %s overlay: %s: %v", id, mountpoint, err) | ||||
| 			return fmt.Errorf("replacing mount point %q: %w", mountpoint, err) | ||||
| 		} | ||||
| 	} | ||||
|  | @ -2410,14 +2433,18 @@ func (d *Driver) Changes(id string, idMappings *idtools.IDMappings, parent strin | |||
| 	// layers.
 | ||||
| 	diffPath, err := d.getDiffPath(id) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 		return nil, fmt.Errorf("failed to get diff path: %w", err) | ||||
| 	} | ||||
| 	layers, err := d.getLowerDiffPaths(id) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 		return nil, fmt.Errorf("failed to get lower diff path: %w", err) | ||||
| 	} | ||||
| 
 | ||||
| 	return archive.OverlayChanges(layers, diffPath) | ||||
| 	c, err := archive.OverlayChanges(layers, diffPath) | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf("computing changes: %w", err) | ||||
| 	} | ||||
| 	return c, nil | ||||
| } | ||||
| 
 | ||||
| // AdditionalImageStores returns additional image stores supported by the driver
 | ||||
|  |  | |||
|  | @ -316,7 +316,11 @@ func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) | |||
| // with respect to the parent layers
 | ||||
| func OverlayChanges(layers []string, rw string) ([]Change, error) { | ||||
| 	dc := func(root, path string, fi os.FileInfo) (string, error) { | ||||
| 		return overlayDeletedFile(layers, root, path, fi) | ||||
| 		r, err := overlayDeletedFile(layers, root, path, fi) | ||||
| 		if err != nil { | ||||
| 			return "", fmt.Errorf("overlay deleted file query: %w", err) | ||||
| 		} | ||||
| 		return r, nil | ||||
| 	} | ||||
| 	return changes(layers, rw, dc, nil, overlayLowerContainsWhiteout) | ||||
| } | ||||
|  | @ -351,7 +355,7 @@ func overlayDeletedFile(layers []string, root, path string, fi os.FileInfo) (str | |||
| 	// If the directory isn't marked as opaque, then it's just a normal directory.
 | ||||
| 	opaque, err := system.Lgetxattr(filepath.Join(root, path), getOverlayOpaqueXattrName()) | ||||
| 	if err != nil { | ||||
| 		return "", err | ||||
| 		return "", fmt.Errorf("failed querying overlay opaque xattr: %w", err) | ||||
| 	} | ||||
| 	if len(opaque) != 1 || opaque[0] != 'y' { | ||||
| 		return "", err | ||||
|  |  | |||
|  | @ -26,6 +26,19 @@ func getFilterPath(name string) string { | |||
| 	return path.(string) | ||||
| } | ||||
| 
 | ||||
| type errorRecordingReader struct { | ||||
| 	r   io.Reader | ||||
| 	err error | ||||
| } | ||||
| 
 | ||||
| func (r *errorRecordingReader) Read(p []byte) (int, error) { | ||||
| 	n, err := r.r.Read(p) | ||||
| 	if r.err == nil && err != io.EOF { | ||||
| 		r.err = err | ||||
| 	} | ||||
| 	return n, err | ||||
| } | ||||
| 
 | ||||
| // tryProcFilter tries to run the command specified in args, passing input to its stdin and returning its stdout.
 | ||||
| // cleanup() is a caller provided function that will be called when the command finishes running, regardless of
 | ||||
| // whether it succeeds or fails.
 | ||||
|  | @ -38,23 +51,21 @@ func tryProcFilter(args []string, input io.Reader, cleanup func()) (io.ReadClose | |||
| 
 | ||||
| 	var stderrBuf bytes.Buffer | ||||
| 
 | ||||
| 	inputWithError := &errorRecordingReader{r: input} | ||||
| 
 | ||||
| 	r, w := io.Pipe() | ||||
| 	cmd := exec.Command(path, args[1:]...) | ||||
| 	cmd.Stdin = input | ||||
| 	cmd.Stdin = inputWithError | ||||
| 	cmd.Stdout = w | ||||
| 	cmd.Stderr = &stderrBuf | ||||
| 	go func() { | ||||
| 		err := cmd.Run() | ||||
| 		if err != nil && stderrBuf.Len() > 0 { | ||||
| 			b := make([]byte, 1) | ||||
| 		// if there is an error reading from input, prefer to return that error
 | ||||
| 			_, errRead := input.Read(b) | ||||
| 			if errRead != nil && errRead != io.EOF { | ||||
| 				err = errRead | ||||
| 			} else { | ||||
| 		if inputWithError.err != nil { | ||||
| 			err = inputWithError.err | ||||
| 		} else if err != nil && stderrBuf.Len() > 0 { | ||||
| 			err = fmt.Errorf("%s: %w", strings.TrimRight(stderrBuf.String(), "\n"), err) | ||||
| 		} | ||||
| 		} | ||||
| 		w.CloseWithError(err) // CloseWithErr(nil) == Close()
 | ||||
| 		cleanup() | ||||
| 	}() | ||||
|  |  | |||
|  | @ -287,6 +287,13 @@ func (c *layersCache) load() error { | |||
| 			newLayers = append(newLayers, l) | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		if r.ReadOnly { | ||||
| 			// if the layer is coming from a read-only store, do not attempt
 | ||||
| 			// to write to it.
 | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		// the cache file is either not present or broken.  Try to generate it from the TOC.
 | ||||
| 		l, err = c.createCacheFileFromTOC(r.ID) | ||||
| 		if err != nil { | ||||
|  |  | |||
|  | @ -5,13 +5,16 @@ import ( | |||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"maps" | ||||
| 	"strconv" | ||||
| 	"time" | ||||
| 
 | ||||
| 	"github.com/containers/storage/pkg/chunked/internal" | ||||
| 	"github.com/klauspost/compress/zstd" | ||||
| 	"github.com/klauspost/pgzip" | ||||
| 	digest "github.com/opencontainers/go-digest" | ||||
| 	"github.com/vbatts/tar-split/archive/tar" | ||||
| 	expMaps "golang.org/x/exp/maps" | ||||
| ) | ||||
| 
 | ||||
| var typesToTar = map[string]byte{ | ||||
|  | @ -221,6 +224,12 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Di | |||
| 		if err != nil { | ||||
| 			return nil, nil, nil, 0, fmt.Errorf("validating and decompressing tar-split: %w", err) | ||||
| 		} | ||||
| 		// We use the TOC for creating on-disk files, but the tar-split for creating metadata
 | ||||
| 		// when exporting the layer contents. Ensure the two match, otherwise local inspection of a container
 | ||||
| 		// might be misleading about the exported contents.
 | ||||
| 		if err := ensureTOCMatchesTarSplit(toc, decodedTarSplit); err != nil { | ||||
| 			return nil, nil, nil, 0, fmt.Errorf("tar-split and TOC data is inconsistent: %w", err) | ||||
| 		} | ||||
| 	} else if tarSplitChunk.Offset > 0 { | ||||
| 		// We must ignore the tar-split when the digest is not present in the TOC, because we can’t authenticate it.
 | ||||
| 		//
 | ||||
|  | @ -234,6 +243,131 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Di | |||
| 	return decodedBlob, toc, decodedTarSplit, int64(manifestChunk.Offset), err | ||||
| } | ||||
| 
 | ||||
| // ensureTOCMatchesTarSplit validates that toc and tarSplit contain _exactly_ the same entries.
 | ||||
| func ensureTOCMatchesTarSplit(toc *internal.TOC, tarSplit []byte) error { | ||||
| 	pendingFiles := map[string]*internal.FileMetadata{} // Name -> an entry in toc.Entries
 | ||||
| 	for i := range toc.Entries { | ||||
| 		e := &toc.Entries[i] | ||||
| 		if e.Type != internal.TypeChunk { | ||||
| 			if _, ok := pendingFiles[e.Name]; ok { | ||||
| 				return fmt.Errorf("TOC contains duplicate entries for path %q", e.Name) | ||||
| 			} | ||||
| 			pendingFiles[e.Name] = e | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	if err := iterateTarSplit(tarSplit, func(hdr *tar.Header) error { | ||||
| 		e, ok := pendingFiles[hdr.Name] | ||||
| 		if !ok { | ||||
| 			return fmt.Errorf("tar-split contains an entry for %q missing in TOC", hdr.Name) | ||||
| 		} | ||||
| 		delete(pendingFiles, hdr.Name) | ||||
| 		expected, err := internal.NewFileMetadata(hdr) | ||||
| 		if err != nil { | ||||
| 			return fmt.Errorf("determining expected metadata for %q: %w", hdr.Name, err) | ||||
| 		} | ||||
| 		if err := ensureFileMetadataAttributesMatch(e, &expected); err != nil { | ||||
| 			return fmt.Errorf("TOC and tar-split metadata doesn’t match: %w", err) | ||||
| 		} | ||||
| 
 | ||||
| 		return nil | ||||
| 	}); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	if len(pendingFiles) != 0 { | ||||
| 		remaining := expMaps.Keys(pendingFiles) | ||||
| 		if len(remaining) > 5 { | ||||
| 			remaining = remaining[:5] // Just to limit the size of the output.
 | ||||
| 		} | ||||
| 		return fmt.Errorf("TOC contains entries not present in tar-split, incl. %q", remaining) | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // ensureTimePointersMatch ensures that a and b are equal
 | ||||
| func ensureTimePointersMatch(a, b *time.Time) error { | ||||
| 	// We didn’t always use “timeIfNotZero” when creating the TOC, so treat time.IsZero the same as nil.
 | ||||
| 	// The archive/tar code turns time.IsZero() timestamps into an Unix timestamp of 0 when writing, but turns an Unix timestamp of 0
 | ||||
| 	// when writing into a (local-timezone) Jan 1 1970, which is not IsZero(). So, treat that the same as IsZero as well.
 | ||||
| 	unixZero := time.Unix(0, 0) | ||||
| 	if a != nil && (a.IsZero() || a.Equal(unixZero)) { | ||||
| 		a = nil | ||||
| 	} | ||||
| 	if b != nil && (b.IsZero() || b.Equal(unixZero)) { | ||||
| 		b = nil | ||||
| 	} | ||||
| 	switch { | ||||
| 	case a == nil && b == nil: | ||||
| 		return nil | ||||
| 	case a == nil: | ||||
| 		return fmt.Errorf("nil != %v", *b) | ||||
| 	case b == nil: | ||||
| 		return fmt.Errorf("%v != nil", *a) | ||||
| 	default: | ||||
| 		if a.Equal(*b) { | ||||
| 			return nil | ||||
| 		} | ||||
| 		return fmt.Errorf("%v != %v", *a, *b) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // ensureFileMetadataAttributesMatch ensures that a and b match in file attributes (it ignores entries relevant to locating data
 | ||||
| // in the tar stream or matching contents)
 | ||||
| func ensureFileMetadataAttributesMatch(a, b *internal.FileMetadata) error { | ||||
| 	// Keep this in sync with internal.FileMetadata!
 | ||||
| 
 | ||||
| 	if a.Type != b.Type { | ||||
| 		return fmt.Errorf("mismatch of Type: %q != %q", a.Type, b.Type) | ||||
| 	} | ||||
| 	if a.Name != b.Name { | ||||
| 		return fmt.Errorf("mismatch of Name: %q != %q", a.Name, b.Name) | ||||
| 	} | ||||
| 	if a.Linkname != b.Linkname { | ||||
| 		return fmt.Errorf("mismatch of Linkname: %q != %q", a.Linkname, b.Linkname) | ||||
| 	} | ||||
| 	if a.Mode != b.Mode { | ||||
| 		return fmt.Errorf("mismatch of Mode: %q != %q", a.Mode, b.Mode) | ||||
| 	} | ||||
| 	if a.Size != b.Size { | ||||
| 		return fmt.Errorf("mismatch of Size: %q != %q", a.Size, b.Size) | ||||
| 	} | ||||
| 	if a.UID != b.UID { | ||||
| 		return fmt.Errorf("mismatch of UID: %q != %q", a.UID, b.UID) | ||||
| 	} | ||||
| 	if a.GID != b.GID { | ||||
| 		return fmt.Errorf("mismatch of GID: %q != %q", a.GID, b.GID) | ||||
| 	} | ||||
| 
 | ||||
| 	if err := ensureTimePointersMatch(a.ModTime, b.ModTime); err != nil { | ||||
| 		return fmt.Errorf("mismatch of ModTime: %w", err) | ||||
| 	} | ||||
| 	if err := ensureTimePointersMatch(a.AccessTime, b.AccessTime); err != nil { | ||||
| 		return fmt.Errorf("mismatch of AccessTime: %w", err) | ||||
| 	} | ||||
| 	if err := ensureTimePointersMatch(a.ChangeTime, b.ChangeTime); err != nil { | ||||
| 		return fmt.Errorf("mismatch of ChangeTime: %w", err) | ||||
| 	} | ||||
| 	if a.Devmajor != b.Devmajor { | ||||
| 		return fmt.Errorf("mismatch of Devmajor: %q != %q", a.Devmajor, b.Devmajor) | ||||
| 	} | ||||
| 	if a.Devminor != b.Devminor { | ||||
| 		return fmt.Errorf("mismatch of Devminor: %q != %q", a.Devminor, b.Devminor) | ||||
| 	} | ||||
| 	if !maps.Equal(a.Xattrs, b.Xattrs) { | ||||
| 		return fmt.Errorf("mismatch of Xattrs: %q != %q", a.Xattrs, b.Xattrs) | ||||
| 	} | ||||
| 
 | ||||
| 	// Digest is not compared
 | ||||
| 	// Offset is not compared
 | ||||
| 	// EndOffset is not compared
 | ||||
| 
 | ||||
| 	// ChunkSize is not compared
 | ||||
| 	// ChunkOffset is not compared
 | ||||
| 	// ChunkDigest is not compared
 | ||||
| 	// ChunkType is not compared
 | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| func decodeAndValidateBlob(blob []byte, lengthUncompressed uint64, expectedCompressedChecksum string) ([]byte, error) { | ||||
| 	d, err := digest.Parse(expectedCompressedChecksum) | ||||
| 	if err != nil { | ||||
|  |  | |||
|  | @ -7,12 +7,8 @@ package compressor | |||
| import ( | ||||
| 	"bufio" | ||||
| 	"bytes" | ||||
| 	"encoding/base64" | ||||
| 	"io" | ||||
| 	"strings" | ||||
| 	"time" | ||||
| 
 | ||||
| 	"github.com/containers/storage/pkg/archive" | ||||
| 	"github.com/containers/storage/pkg/chunked/internal" | ||||
| 	"github.com/containers/storage/pkg/ioutils" | ||||
| 	"github.com/klauspost/compress/zstd" | ||||
|  | @ -234,14 +230,6 @@ func newTarSplitData(level int) (*tarSplitData, error) { | |||
| 	}, nil | ||||
| } | ||||
| 
 | ||||
| // timeIfNotZero returns a pointer to the time.Time if it is not zero, otherwise it returns nil.
 | ||||
| func timeIfNotZero(t *time.Time) *time.Time { | ||||
| 	if t == nil || t.IsZero() { | ||||
| 		return nil | ||||
| 	} | ||||
| 	return t | ||||
| } | ||||
| 
 | ||||
| func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, reader io.Reader, level int) error { | ||||
| 	// total written so far.  Used to retrieve partial offsets in the file
 | ||||
| 	dest := ioutils.NewWriteCounter(destFile) | ||||
|  | @ -380,38 +368,14 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r | |||
| 			} | ||||
| 		} | ||||
| 
 | ||||
| 		typ, err := internal.GetType(hdr.Typeflag) | ||||
| 		mainEntry, err := internal.NewFileMetadata(hdr) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		xattrs := make(map[string]string) | ||||
| 		for k, v := range hdr.PAXRecords { | ||||
| 			xattrKey, ok := strings.CutPrefix(k, archive.PaxSchilyXattr) | ||||
| 			if !ok { | ||||
| 				continue | ||||
| 			} | ||||
| 			xattrs[xattrKey] = base64.StdEncoding.EncodeToString([]byte(v)) | ||||
| 		} | ||||
| 		entries := []internal.FileMetadata{ | ||||
| 			{ | ||||
| 				Type:       typ, | ||||
| 				Name:       hdr.Name, | ||||
| 				Linkname:   hdr.Linkname, | ||||
| 				Mode:       hdr.Mode, | ||||
| 				Size:       hdr.Size, | ||||
| 				UID:        hdr.Uid, | ||||
| 				GID:        hdr.Gid, | ||||
| 				ModTime:    timeIfNotZero(&hdr.ModTime), | ||||
| 				AccessTime: timeIfNotZero(&hdr.AccessTime), | ||||
| 				ChangeTime: timeIfNotZero(&hdr.ChangeTime), | ||||
| 				Devmajor:   hdr.Devmajor, | ||||
| 				Devminor:   hdr.Devminor, | ||||
| 				Xattrs:     xattrs, | ||||
| 				Digest:     checksum, | ||||
| 				Offset:     startOffset, | ||||
| 				EndOffset:  lastOffset, | ||||
| 			}, | ||||
| 		} | ||||
| 		mainEntry.Digest = checksum | ||||
| 		mainEntry.Offset = startOffset | ||||
| 		mainEntry.EndOffset = lastOffset | ||||
| 		entries := []internal.FileMetadata{mainEntry} | ||||
| 		for i := 1; i < len(chunks); i++ { | ||||
| 			entries = append(entries, internal.FileMetadata{ | ||||
| 				Type:        internal.TypeChunk, | ||||
|  |  | |||
|  | @ -4,6 +4,7 @@ package dump | |||
| 
 | ||||
| import ( | ||||
| 	"bufio" | ||||
| 	"encoding/base64" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"path/filepath" | ||||
|  | @ -22,12 +23,12 @@ const ( | |||
| 	ESCAPE_LONE_DASH | ||||
| ) | ||||
| 
 | ||||
| func escaped(val string, escape int) string { | ||||
| func escaped(val []byte, escape int) string { | ||||
| 	noescapeSpace := escape&NOESCAPE_SPACE != 0 | ||||
| 	escapeEqual := escape&ESCAPE_EQUAL != 0 | ||||
| 	escapeLoneDash := escape&ESCAPE_LONE_DASH != 0 | ||||
| 
 | ||||
| 	if escapeLoneDash && val == "-" { | ||||
| 	if escapeLoneDash && len(val) == 1 && val[0] == '-' { | ||||
| 		return fmt.Sprintf("\\x%.2x", val[0]) | ||||
| 	} | ||||
| 
 | ||||
|  | @ -75,8 +76,8 @@ func escaped(val string, escape int) string { | |||
| 	return result | ||||
| } | ||||
| 
 | ||||
| func escapedOptional(val string, escape int) string { | ||||
| 	if val == "" { | ||||
| func escapedOptional(val []byte, escape int) string { | ||||
| 	if len(val) == 0 { | ||||
| 		return "-" | ||||
| 	} | ||||
| 	return escaped(val, escape) | ||||
|  | @ -136,7 +137,7 @@ func dumpNode(out io.Writer, added map[string]*internal.FileMetadata, links map[ | |||
| 	} | ||||
| 	added[path] = entry | ||||
| 
 | ||||
| 	if _, err := fmt.Fprint(out, escaped(path, ESCAPE_STANDARD)); err != nil { | ||||
| 	if _, err := fmt.Fprint(out, escaped([]byte(path), ESCAPE_STANDARD)); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
|  | @ -180,7 +181,7 @@ func dumpNode(out io.Writer, added map[string]*internal.FileMetadata, links map[ | |||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	if _, err := fmt.Fprint(out, escapedOptional(payload, ESCAPE_LONE_DASH)); err != nil { | ||||
| 	if _, err := fmt.Fprint(out, escapedOptional([]byte(payload), ESCAPE_LONE_DASH)); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
|  | @ -194,14 +195,18 @@ func dumpNode(out io.Writer, added map[string]*internal.FileMetadata, links map[ | |||
| 		return err | ||||
| 	} | ||||
| 	digest := verityDigests[payload] | ||||
| 	if _, err := fmt.Fprint(out, escapedOptional(digest, ESCAPE_LONE_DASH)); err != nil { | ||||
| 	if _, err := fmt.Fprint(out, escapedOptional([]byte(digest), ESCAPE_LONE_DASH)); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	for k, v := range entry.Xattrs { | ||||
| 		name := escaped(k, ESCAPE_EQUAL) | ||||
| 		value := escaped(v, ESCAPE_EQUAL) | ||||
| 	for k, vEncoded := range entry.Xattrs { | ||||
| 		v, err := base64.StdEncoding.DecodeString(vEncoded) | ||||
| 		if err != nil { | ||||
| 			return fmt.Errorf("decode xattr %q: %w", k, err) | ||||
| 		} | ||||
| 		name := escaped([]byte(k), ESCAPE_EQUAL) | ||||
| 
 | ||||
| 		value := escaped(v, ESCAPE_EQUAL) | ||||
| 		if _, err := fmt.Fprintf(out, " %s=%s", name, value); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
|  |  | |||
|  | @ -5,16 +5,19 @@ package internal | |||
| // larger software like the graph drivers.
 | ||||
| 
 | ||||
| import ( | ||||
| 	"archive/tar" | ||||
| 	"bytes" | ||||
| 	"encoding/base64" | ||||
| 	"encoding/binary" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"strings" | ||||
| 	"time" | ||||
| 
 | ||||
| 	"github.com/containers/storage/pkg/archive" | ||||
| 	jsoniter "github.com/json-iterator/go" | ||||
| 	"github.com/klauspost/compress/zstd" | ||||
| 	"github.com/opencontainers/go-digest" | ||||
| 	"github.com/vbatts/tar-split/archive/tar" | ||||
| ) | ||||
| 
 | ||||
| // TOC is short for Table of Contents and is used by the zstd:chunked
 | ||||
|  | @ -36,10 +39,22 @@ type TOC struct { | |||
| // that duplicates what can found in the tar header (and should match), but
 | ||||
| // also special/custom content (see below).
 | ||||
| //
 | ||||
| // Regular files may optionally be represented as a sequence of “chunks”,
 | ||||
| // which may be ChunkTypeData or ChunkTypeZeros (and ChunkTypeData boundaries
 | ||||
| // are heuristically determined to increase chance of chunk matching / reuse
 | ||||
| // similar to rsync). In that case, the regular file is represented
 | ||||
| // as an initial TypeReg entry (with all metadata for the file as a whole)
 | ||||
| // immediately followed by zero or more TypeChunk entries (containing only Type,
 | ||||
| // Name and Chunk* fields); if there is at least one TypeChunk entry, the Chunk*
 | ||||
| // fields are relevant in all of these entries, including the initial
 | ||||
| // TypeReg one.
 | ||||
| //
 | ||||
| // Note that the metadata here, when fetched by a zstd:chunked aware client,
 | ||||
| // is used instead of that in the tar stream.  The contents of the tar stream
 | ||||
| // are not used in this scenario.
 | ||||
| type FileMetadata struct { | ||||
| 	// If you add any fields, update ensureFileMetadataMatches as well!
 | ||||
| 
 | ||||
| 	// The metadata below largely duplicates that in the tar headers.
 | ||||
| 	Type       string            `json:"type"` | ||||
| 	Name       string            `json:"name"` | ||||
|  | @ -267,3 +282,43 @@ func footerDataToBlob(footer ZstdChunkedFooterData) []byte { | |||
| 
 | ||||
| 	return manifestDataLE | ||||
| } | ||||
| 
 | ||||
| // timeIfNotZero returns a pointer to the time.Time if it is not zero, otherwise it returns nil.
 | ||||
| func timeIfNotZero(t *time.Time) *time.Time { | ||||
| 	if t == nil || t.IsZero() { | ||||
| 		return nil | ||||
| 	} | ||||
| 	return t | ||||
| } | ||||
| 
 | ||||
| // NewFileMetadata creates a basic FileMetadata entry for hdr.
 | ||||
| // The caller must set DigestOffset/EndOffset, and the Chunk* values, separately.
 | ||||
| func NewFileMetadata(hdr *tar.Header) (FileMetadata, error) { | ||||
| 	typ, err := GetType(hdr.Typeflag) | ||||
| 	if err != nil { | ||||
| 		return FileMetadata{}, err | ||||
| 	} | ||||
| 	xattrs := make(map[string]string) | ||||
| 	for k, v := range hdr.PAXRecords { | ||||
| 		xattrKey, ok := strings.CutPrefix(k, archive.PaxSchilyXattr) | ||||
| 		if !ok { | ||||
| 			continue | ||||
| 		} | ||||
| 		xattrs[xattrKey] = base64.StdEncoding.EncodeToString([]byte(v)) | ||||
| 	} | ||||
| 	return FileMetadata{ | ||||
| 		Type:       typ, | ||||
| 		Name:       hdr.Name, | ||||
| 		Linkname:   hdr.Linkname, | ||||
| 		Mode:       hdr.Mode, | ||||
| 		Size:       hdr.Size, | ||||
| 		UID:        hdr.Uid, | ||||
| 		GID:        hdr.Gid, | ||||
| 		ModTime:    timeIfNotZero(&hdr.ModTime), | ||||
| 		AccessTime: timeIfNotZero(&hdr.AccessTime), | ||||
| 		ChangeTime: timeIfNotZero(&hdr.ChangeTime), | ||||
| 		Devmajor:   hdr.Devmajor, | ||||
| 		Devminor:   hdr.Devminor, | ||||
| 		Xattrs:     xattrs, | ||||
| 	}, nil | ||||
| } | ||||
|  |  | |||
							
								
								
									
										68
									
								
								vendor/github.com/containers/storage/pkg/chunked/tar_split_linux.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										68
									
								
								vendor/github.com/containers/storage/pkg/chunked/tar_split_linux.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							|  | @ -0,0 +1,68 @@ | |||
| package chunked | ||||
| 
 | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 
 | ||||
| 	"github.com/vbatts/tar-split/archive/tar" | ||||
| 	"github.com/vbatts/tar-split/tar/storage" | ||||
| ) | ||||
| 
 | ||||
| // iterateTarSplit calls handler for each tar header in tarSplit
 | ||||
| func iterateTarSplit(tarSplit []byte, handler func(hdr *tar.Header) error) error { | ||||
| 	// This, strictly speaking, hard-codes undocumented assumptions about how github.com/vbatts/tar-split/tar/asm.NewInputTarStream
 | ||||
| 	// forms the tar-split contents. Pragmatically, NewInputTarStream should always produce storage.FileType entries at least
 | ||||
| 	// for every non-empty file, which constraints it basically to the output we expect.
 | ||||
| 	//
 | ||||
| 	// Specifically, we assume:
 | ||||
| 	// - There is a separate SegmentType entry for every tar header, but only one SegmentType entry for the full header incl. any extensions
 | ||||
| 	// - (There is a FileType entry for every tar header, we ignore it)
 | ||||
| 	// - Trailing padding of a file, if any, is included in the next SegmentType entry
 | ||||
| 	// - At the end, there may be SegmentType entries just for the terminating zero blocks.
 | ||||
| 
 | ||||
| 	unpacker := storage.NewJSONUnpacker(bytes.NewReader(tarSplit)) | ||||
| 	for { | ||||
| 		tsEntry, err := unpacker.Next() | ||||
| 		if err != nil { | ||||
| 			if err == io.EOF { | ||||
| 				return nil | ||||
| 			} | ||||
| 			return fmt.Errorf("reading tar-split entries: %w", err) | ||||
| 		} | ||||
| 		switch tsEntry.Type { | ||||
| 		case storage.SegmentType: | ||||
| 			payload := tsEntry.Payload | ||||
| 			// This is horrible, but we don’t know how much padding to skip. (It can be computed from the previous hdr.Size for non-sparse
 | ||||
| 			// files, but for sparse files that is set to the logical size.)
 | ||||
| 			//
 | ||||
| 			// First, assume that all padding is zero bytes.
 | ||||
| 			// A tar header starts with a file name, which might in principle be empty, but
 | ||||
| 			// at least https://github.com/opencontainers/image-spec/blob/main/layer.md#populate-initial-filesystem suggests that
 | ||||
| 			// the tar name should never be empty (it should be ".", or maybe "./").
 | ||||
| 			//
 | ||||
| 			// This will cause us to skip all zero bytes in the trailing blocks, but that’s fine.
 | ||||
| 			i := 0 | ||||
| 			for i < len(payload) && payload[i] == 0 { | ||||
| 				i++ | ||||
| 			} | ||||
| 			payload = payload[i:] | ||||
| 			tr := tar.NewReader(bytes.NewReader(payload)) | ||||
| 			hdr, err := tr.Next() | ||||
| 			if err != nil { | ||||
| 				if err == io.EOF { // Probably the last entry, but let’s let the unpacker drive that.
 | ||||
| 					break | ||||
| 				} | ||||
| 				return fmt.Errorf("decoding a tar header from a tar-split entry: %w", err) | ||||
| 			} | ||||
| 			if err := handler(hdr); err != nil { | ||||
| 				return err | ||||
| 			} | ||||
| 
 | ||||
| 		case storage.FileType: | ||||
| 			// Nothing
 | ||||
| 		default: | ||||
| 			return fmt.Errorf("unexpected tar-split entry type %q", tsEntry.Type) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | @ -6,10 +6,12 @@ package loopback | |||
| import ( | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"io/fs" | ||||
| 	"os" | ||||
| 	"syscall" | ||||
| 
 | ||||
| 	"github.com/sirupsen/logrus" | ||||
| 	"golang.org/x/sys/unix" | ||||
| ) | ||||
| 
 | ||||
| // Loopback related errors
 | ||||
|  | @ -39,7 +41,7 @@ func getNextFreeLoopbackIndex() (int, error) { | |||
| 	return index, err | ||||
| } | ||||
| 
 | ||||
| func openNextAvailableLoopback(index int, sparseName string, sparseFile *os.File) (loopFile *os.File, err error) { | ||||
| func openNextAvailableLoopback(sparseName string, sparseFile *os.File) (loopFile *os.File, err error) { | ||||
| 	// Read information about the loopback file.
 | ||||
| 	var st syscall.Stat_t | ||||
| 	err = syscall.Fstat(int(sparseFile.Fd()), &st) | ||||
|  | @ -48,31 +50,51 @@ func openNextAvailableLoopback(index int, sparseName string, sparseFile *os.File | |||
| 		return nil, ErrAttachLoopbackDevice | ||||
| 	} | ||||
| 
 | ||||
| 	// upper bound to avoid infinite loop
 | ||||
| 	remaining := 1000 | ||||
| 
 | ||||
| 	// Start looking for a free /dev/loop
 | ||||
| 	for { | ||||
| 		target := fmt.Sprintf("/dev/loop%d", index) | ||||
| 		index++ | ||||
| 
 | ||||
| 		fi, err := os.Stat(target) | ||||
| 		if err != nil { | ||||
| 			if os.IsNotExist(err) { | ||||
| 				logrus.Error("There are no more loopback devices available.") | ||||
| 			} | ||||
| 		if remaining == 0 { | ||||
| 			logrus.Errorf("No free loopback devices available") | ||||
| 			return nil, ErrAttachLoopbackDevice | ||||
| 		} | ||||
| 		remaining-- | ||||
| 
 | ||||
| 		if fi.Mode()&os.ModeDevice != os.ModeDevice { | ||||
| 			logrus.Errorf("Loopback device %s is not a block device.", target) | ||||
| 			continue | ||||
| 		index, err := getNextFreeLoopbackIndex() | ||||
| 		if err != nil { | ||||
| 			logrus.Debugf("Error retrieving the next available loopback: %s", err) | ||||
| 			return nil, err | ||||
| 		} | ||||
| 
 | ||||
| 		target := fmt.Sprintf("/dev/loop%d", index) | ||||
| 
 | ||||
| 		// OpenFile adds O_CLOEXEC
 | ||||
| 		loopFile, err = os.OpenFile(target, os.O_RDWR, 0o644) | ||||
| 		if err != nil { | ||||
| 			// The kernel returns ENXIO when opening a device that is in the "deleting" or "rundown" state, so
 | ||||
| 			// just treat ENXIO as if the device does not exist.
 | ||||
| 			if errors.Is(err, fs.ErrNotExist) || errors.Is(err, unix.ENXIO) { | ||||
| 				// Another process could have taken the loopback device in the meantime.  So repeat
 | ||||
| 				// the process with the next loopback device.
 | ||||
| 				continue | ||||
| 			} | ||||
| 			logrus.Errorf("Opening loopback device: %s", err) | ||||
| 			return nil, ErrAttachLoopbackDevice | ||||
| 		} | ||||
| 
 | ||||
| 		fi, err := loopFile.Stat() | ||||
| 		if err != nil { | ||||
| 			loopFile.Close() | ||||
| 			logrus.Errorf("Stat loopback device: %s", err) | ||||
| 			return nil, ErrAttachLoopbackDevice | ||||
| 		} | ||||
| 		if fi.Mode()&os.ModeDevice != os.ModeDevice { | ||||
| 			loopFile.Close() | ||||
| 			logrus.Errorf("Loopback device %s is not a block device.", target) | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		// Try to attach to the loop file
 | ||||
| 		if err := ioctlLoopSetFd(loopFile.Fd(), sparseFile.Fd()); err != nil { | ||||
| 			loopFile.Close() | ||||
|  | @ -124,14 +146,6 @@ func AttachLoopDeviceRO(sparseName string) (loop *os.File, err error) { | |||
| } | ||||
| 
 | ||||
| func attachLoopDevice(sparseName string, readonly bool) (loop *os.File, err error) { | ||||
| 	// Try to retrieve the next available loopback device via syscall.
 | ||||
| 	// If it fails, we discard error and start looping for a
 | ||||
| 	// loopback from index 0.
 | ||||
| 	startIndex, err := getNextFreeLoopbackIndex() | ||||
| 	if err != nil { | ||||
| 		logrus.Debugf("Error retrieving the next available loopback: %s", err) | ||||
| 	} | ||||
| 
 | ||||
| 	var sparseFile *os.File | ||||
| 
 | ||||
| 	// OpenFile adds O_CLOEXEC
 | ||||
|  | @ -146,7 +160,7 @@ func attachLoopDevice(sparseName string, readonly bool) (loop *os.File, err erro | |||
| 	} | ||||
| 	defer sparseFile.Close() | ||||
| 
 | ||||
| 	loopFile, err := openNextAvailableLoopback(startIndex, sparseName, sparseFile) | ||||
| 	loopFile, err := openNextAvailableLoopback(sparseName, sparseFile) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  |  | |||
|  | @ -2846,7 +2846,7 @@ func (s *store) mount(id string, options drivers.MountOpts) (string, error) { | |||
| 		exists := store.Exists(id) | ||||
| 		store.stopReading() | ||||
| 		if exists { | ||||
| 			return "", fmt.Errorf("mounting read/only store images is not allowed: %w", ErrLayerUnknown) | ||||
| 			return "", fmt.Errorf("mounting read/only store images is not allowed: %w", ErrStoreIsReadOnly) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
|  | @ -2930,15 +2930,41 @@ func (s *store) Unmount(id string, force bool) (bool, error) { | |||
| } | ||||
| 
 | ||||
| func (s *store) Changes(from, to string) ([]archive.Change, error) { | ||||
| 	if res, done, err := readAllLayerStores(s, func(store roLayerStore) ([]archive.Change, bool, error) { | ||||
| 	// NaiveDiff could cause mounts to happen without a lock, so be safe
 | ||||
| 	// and treat the .Diff operation as a Mount.
 | ||||
| 	// We need to make sure the home mount is present when the Mount is done, which happens by possibly reinitializing the graph driver
 | ||||
| 	// in startUsingGraphDriver().
 | ||||
| 	if err := s.startUsingGraphDriver(); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	defer s.stopUsingGraphDriver() | ||||
| 
 | ||||
| 	rlstore, lstores, err := s.bothLayerStoreKindsLocked() | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	if err := rlstore.startWriting(); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	if rlstore.Exists(to) { | ||||
| 		res, err := rlstore.Changes(from, to) | ||||
| 		rlstore.stopWriting() | ||||
| 		return res, err | ||||
| 	} | ||||
| 	rlstore.stopWriting() | ||||
| 
 | ||||
| 	for _, s := range lstores { | ||||
| 		store := s | ||||
| 		if err := store.startReading(); err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 		if store.Exists(to) { | ||||
| 			res, err := store.Changes(from, to) | ||||
| 			return res, true, err | ||||
| 		} | ||||
| 		return nil, false, nil | ||||
| 	}); done { | ||||
| 			store.stopReading() | ||||
| 			return res, err | ||||
| 		} | ||||
| 		store.stopReading() | ||||
| 	} | ||||
| 	return nil, ErrLayerUnknown | ||||
| } | ||||
| 
 | ||||
|  | @ -2968,12 +2994,30 @@ func (s *store) Diff(from, to string, options *DiffOptions) (io.ReadCloser, erro | |||
| 	} | ||||
| 	defer s.stopUsingGraphDriver() | ||||
| 
 | ||||
| 	layerStores, err := s.allLayerStoresLocked() | ||||
| 	rlstore, lstores, err := s.bothLayerStoreKindsLocked() | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	for _, s := range layerStores { | ||||
| 	if err := rlstore.startWriting(); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	if rlstore.Exists(to) { | ||||
| 		rc, err := rlstore.Diff(from, to, options) | ||||
| 		if rc != nil && err == nil { | ||||
| 			wrapped := ioutils.NewReadCloserWrapper(rc, func() error { | ||||
| 				err := rc.Close() | ||||
| 				rlstore.stopWriting() | ||||
| 				return err | ||||
| 			}) | ||||
| 			return wrapped, nil | ||||
| 		} | ||||
| 		rlstore.stopWriting() | ||||
| 		return rc, err | ||||
| 	} | ||||
| 	rlstore.stopWriting() | ||||
| 
 | ||||
| 	for _, s := range lstores { | ||||
| 		store := s | ||||
| 		if err := store.startReading(); err != nil { | ||||
| 			return nil, err | ||||
|  |  | |||
|  | @ -51,7 +51,7 @@ func mountedByOpenat2(path string) (bool, error) { | |||
| 		Resolve: unix.RESOLVE_NO_XDEV, | ||||
| 	}) | ||||
| 	_ = unix.Close(dirfd) | ||||
| 	switch err { //nolint:errorlint // unix errors are bare
 | ||||
| 	switch err { | ||||
| 	case nil: // definitely not a mount
 | ||||
| 		_ = unix.Close(fd) | ||||
| 		return false, nil | ||||
|  |  | |||
|  | @ -197,7 +197,6 @@ func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) { | |||
| 		for { | ||||
| 			var line []byte | ||||
| 			line, isPrefix, err = rd.ReadLine() | ||||
| 
 | ||||
| 			if err != nil { | ||||
| 				// We should return no error if EOF is reached
 | ||||
| 				// without a match.
 | ||||
|  |  | |||
|  | @ -233,7 +233,7 @@ func (s *bFiller) Fill(w io.Writer, stat decor.Statistics) error { | |||
| 	var tip component | ||||
| 	var refilling, filling, padding []byte | ||||
| 	var fillCount int | ||||
| 	curWidth := int(internal.PercentageRound(stat.Total, stat.Current, uint(width))) | ||||
| 	curWidth := int(internal.PercentageRound(stat.Total, stat.Current, int64(width))) | ||||
| 
 | ||||
| 	if curWidth != 0 { | ||||
| 		if !stat.Completed || s.tipOnComplete { | ||||
|  | @ -241,20 +241,19 @@ func (s *bFiller) Fill(w io.Writer, stat decor.Statistics) error { | |||
| 			s.tip.count++ | ||||
| 			fillCount += tip.width | ||||
| 		} | ||||
| 		if stat.Refill != 0 { | ||||
| 			refWidth := int(internal.PercentageRound(stat.Total, stat.Refill, uint(width))) | ||||
| 		switch refWidth := 0; { | ||||
| 		case stat.Refill != 0: | ||||
| 			refWidth = int(internal.PercentageRound(stat.Total, stat.Refill, int64(width))) | ||||
| 			curWidth -= refWidth | ||||
| 			refWidth += curWidth | ||||
| 			fallthrough | ||||
| 		default: | ||||
| 			for w := s.components[iFiller].width; curWidth-fillCount >= w; fillCount += w { | ||||
| 				filling = append(filling, s.components[iFiller].bytes...) | ||||
| 			} | ||||
| 			for w := s.components[iRefiller].width; refWidth-fillCount >= w; fillCount += w { | ||||
| 				refilling = append(refilling, s.components[iRefiller].bytes...) | ||||
| 			} | ||||
| 		} else { | ||||
| 			for w := s.components[iFiller].width; curWidth-fillCount >= w; fillCount += w { | ||||
| 				filling = append(filling, s.components[iFiller].bytes...) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
|  |  | |||
|  | @ -81,7 +81,8 @@ func (d *movingAverageETA) Decor(s Statistics) (string, int) { | |||
| func (d *movingAverageETA) EwmaUpdate(n int64, dur time.Duration) { | ||||
| 	if n <= 0 { | ||||
| 		d.zDur += dur | ||||
| 	} else { | ||||
| 		return | ||||
| 	} | ||||
| 	durPerItem := float64(d.zDur+dur) / float64(n) | ||||
| 	if math.IsInf(durPerItem, 0) || math.IsNaN(durPerItem) { | ||||
| 		d.zDur += dur | ||||
|  | @ -89,7 +90,6 @@ func (d *movingAverageETA) EwmaUpdate(n int64, dur time.Duration) { | |||
| 	} | ||||
| 	d.zDur = 0 | ||||
| 	d.average.Add(durPerItem) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // AverageETA decorator. It's wrapper of NewAverageETA.
 | ||||
|  |  | |||
|  | @ -61,7 +61,7 @@ func NewPercentage(format string, wcc ...WC) Decorator { | |||
| 		format = "% d" | ||||
| 	} | ||||
| 	f := func(s Statistics) string { | ||||
| 		p := internal.Percentage(s.Total, s.Current, 100) | ||||
| 		p := internal.PercentageRound(s.Total, s.Current, 100) | ||||
| 		return fmt.Sprintf(format, percentageType(p)) | ||||
| 	} | ||||
| 	return Any(f, wcc...) | ||||
|  |  | |||
|  | @ -96,7 +96,8 @@ func (d *movingAverageSpeed) Decor(_ Statistics) (string, int) { | |||
| func (d *movingAverageSpeed) EwmaUpdate(n int64, dur time.Duration) { | ||||
| 	if n <= 0 { | ||||
| 		d.zDur += dur | ||||
| 	} else { | ||||
| 		return | ||||
| 	} | ||||
| 	durPerByte := float64(d.zDur+dur) / float64(n) | ||||
| 	if math.IsInf(durPerByte, 0) || math.IsNaN(durPerByte) { | ||||
| 		d.zDur += dur | ||||
|  | @ -104,7 +105,6 @@ func (d *movingAverageSpeed) EwmaUpdate(n int64, dur time.Duration) { | |||
| 	} | ||||
| 	d.zDur = 0 | ||||
| 	d.average.Add(durPerByte) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // AverageSpeed decorator with dynamic unit measure adjustment. It's
 | ||||
|  |  | |||
|  | @ -3,17 +3,20 @@ package internal | |||
| import "math" | ||||
| 
 | ||||
| // Percentage is a helper function, to calculate percentage.
 | ||||
| func Percentage(total, current int64, width uint) float64 { | ||||
| 	if total <= 0 { | ||||
| func Percentage(total, current, width uint) float64 { | ||||
| 	if total == 0 { | ||||
| 		return 0 | ||||
| 	} | ||||
| 	if current >= total { | ||||
| 		return float64(width) | ||||
| 	} | ||||
| 	return float64(int64(width)*current) / float64(total) | ||||
| 	return float64(width*current) / float64(total) | ||||
| } | ||||
| 
 | ||||
| // PercentageRound same as Percentage but with math.Round.
 | ||||
| func PercentageRound(total, current int64, width uint) float64 { | ||||
| 	return math.Round(Percentage(total, current, width)) | ||||
| func PercentageRound(total, current, width int64) float64 { | ||||
| 	if total < 0 || current < 0 || width < 0 { | ||||
| 		return 0 | ||||
| 	} | ||||
| 	return math.Round(Percentage(uint(total), uint(current), uint(width))) | ||||
| } | ||||
|  |  | |||
|  | @ -18,7 +18,7 @@ github.com/Microsoft/go-winio/internal/socket | |||
| github.com/Microsoft/go-winio/internal/stringbuffer | ||||
| github.com/Microsoft/go-winio/pkg/guid | ||||
| github.com/Microsoft/go-winio/vhd | ||||
| # github.com/Microsoft/hcsshim v0.12.4 | ||||
| # github.com/Microsoft/hcsshim v0.12.5 | ||||
| ## explicit; go 1.21 | ||||
| github.com/Microsoft/hcsshim | ||||
| github.com/Microsoft/hcsshim/computestorage | ||||
|  | @ -92,7 +92,7 @@ github.com/containerd/stargz-snapshotter/estargz/errorutil | |||
| # github.com/containerd/typeurl/v2 v2.1.1 | ||||
| ## explicit; go 1.13 | ||||
| github.com/containerd/typeurl/v2 | ||||
| # github.com/containernetworking/cni v1.2.2 | ||||
| # github.com/containernetworking/cni v1.2.3 | ||||
| ## explicit; go 1.21 | ||||
| github.com/containernetworking/cni/libcni | ||||
| github.com/containernetworking/cni/pkg/invoke | ||||
|  | @ -107,7 +107,7 @@ github.com/containernetworking/cni/pkg/version | |||
| # github.com/containernetworking/plugins v1.5.1 | ||||
| ## explicit; go 1.20 | ||||
| github.com/containernetworking/plugins/pkg/ns | ||||
| # github.com/containers/common v0.59.1-0.20240712101718-237a317152ae | ||||
| # github.com/containers/common v0.60.0 | ||||
| ## explicit; go 1.21.0 | ||||
| github.com/containers/common/internal | ||||
| github.com/containers/common/internal/attributedstring | ||||
|  | @ -160,7 +160,7 @@ github.com/containers/common/pkg/umask | |||
| github.com/containers/common/pkg/util | ||||
| github.com/containers/common/pkg/version | ||||
| github.com/containers/common/version | ||||
| # github.com/containers/image/v5 v5.31.1-0.20240711123249-1dbd8fbbe516 | ||||
| # github.com/containers/image/v5 v5.32.0 | ||||
| ## explicit; go 1.21.0 | ||||
| github.com/containers/image/v5/copy | ||||
| github.com/containers/image/v5/directory | ||||
|  | @ -252,7 +252,7 @@ github.com/containers/ocicrypt/keywrap/pkcs7 | |||
| github.com/containers/ocicrypt/spec | ||||
| github.com/containers/ocicrypt/utils | ||||
| github.com/containers/ocicrypt/utils/keyprovider | ||||
| # github.com/containers/storage v1.54.1-0.20240712125645-98ad80d6d165 | ||||
| # github.com/containers/storage v1.55.0 | ||||
| ## explicit; go 1.21 | ||||
| github.com/containers/storage | ||||
| github.com/containers/storage/drivers | ||||
|  | @ -541,14 +541,14 @@ github.com/moby/docker-image-spec/specs-go/v1 | |||
| # github.com/moby/patternmatcher v0.6.0 | ||||
| ## explicit; go 1.19 | ||||
| github.com/moby/patternmatcher | ||||
| # github.com/moby/sys/mountinfo v0.7.1 | ||||
| ## explicit; go 1.16 | ||||
| # github.com/moby/sys/mountinfo v0.7.2 | ||||
| ## explicit; go 1.17 | ||||
| github.com/moby/sys/mountinfo | ||||
| # github.com/moby/sys/sequential v0.5.0 | ||||
| ## explicit; go 1.17 | ||||
| github.com/moby/sys/sequential | ||||
| # github.com/moby/sys/user v0.1.0 | ||||
| ## explicit; go 1.17 | ||||
| # github.com/moby/sys/user v0.2.0 | ||||
| ## explicit; go 1.21 | ||||
| github.com/moby/sys/user | ||||
| # github.com/moby/term v0.5.0 | ||||
| ## explicit; go 1.18 | ||||
|  | @ -719,7 +719,7 @@ github.com/ulikunitz/xz/lzma | |||
| github.com/vbatts/tar-split/archive/tar | ||||
| github.com/vbatts/tar-split/tar/asm | ||||
| github.com/vbatts/tar-split/tar/storage | ||||
| # github.com/vbauerster/mpb/v8 v8.7.3 | ||||
| # github.com/vbauerster/mpb/v8 v8.7.4 | ||||
| ## explicit; go 1.17 | ||||
| github.com/vbauerster/mpb/v8 | ||||
| github.com/vbauerster/mpb/v8/cwriter | ||||
|  |  | |||
		Loading…
	
		Reference in New Issue