fix(deps): update module github.com/containers/image/v5 to v5.31.0
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
This commit is contained in:
parent
465dbfcdf0
commit
c7937cd1d2
10
go.mod
10
go.mod
|
@ -21,13 +21,13 @@ require (
|
||||||
github.com/containerd/containerd v1.7.16
|
github.com/containerd/containerd v1.7.16
|
||||||
github.com/containernetworking/cni v1.1.2
|
github.com/containernetworking/cni v1.1.2
|
||||||
github.com/containers/common v0.58.1-0.20240509172903-2c88a3f280bb
|
github.com/containers/common v0.58.1-0.20240509172903-2c88a3f280bb
|
||||||
github.com/containers/image/v5 v5.30.2-0.20240509191815-9318d0eaaf78
|
github.com/containers/image/v5 v5.31.0
|
||||||
github.com/containers/luksy v0.0.0-20240506205542-84b50f50f3ee
|
github.com/containers/luksy v0.0.0-20240506205542-84b50f50f3ee
|
||||||
github.com/containers/ocicrypt v1.1.10
|
github.com/containers/ocicrypt v1.1.10
|
||||||
github.com/containers/storage v1.53.1-0.20240411065836-1fd0dc1d20e5
|
github.com/containers/storage v1.54.0
|
||||||
github.com/cyphar/filepath-securejoin v0.2.5
|
github.com/cyphar/filepath-securejoin v0.2.5
|
||||||
github.com/docker/distribution v2.8.3+incompatible
|
github.com/docker/distribution v2.8.3+incompatible
|
||||||
github.com/docker/docker v26.1.2+incompatible
|
github.com/docker/docker v26.1.3+incompatible
|
||||||
github.com/docker/go-units v0.5.0
|
github.com/docker/go-units v0.5.0
|
||||||
github.com/fsouza/go-dockerclient v1.11.0
|
github.com/fsouza/go-dockerclient v1.11.0
|
||||||
github.com/hashicorp/go-multierror v1.1.1
|
github.com/hashicorp/go-multierror v1.1.1
|
||||||
|
@ -62,8 +62,8 @@ require (
|
||||||
dario.cat/mergo v1.0.0 // indirect
|
dario.cat/mergo v1.0.0 // indirect
|
||||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
|
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
|
||||||
github.com/BurntSushi/toml v1.3.2 // indirect
|
github.com/BurntSushi/toml v1.3.2 // indirect
|
||||||
github.com/Microsoft/go-winio v0.6.1 // indirect
|
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||||
github.com/Microsoft/hcsshim v0.12.0 // indirect
|
github.com/Microsoft/hcsshim v0.12.3 // indirect
|
||||||
github.com/VividCortex/ewma v1.2.0 // indirect
|
github.com/VividCortex/ewma v1.2.0 // indirect
|
||||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect
|
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect
|
||||||
github.com/aead/serpent v0.0.0-20160714141033-fba169763ea6 // indirect
|
github.com/aead/serpent v0.0.0-20160714141033-fba169763ea6 // indirect
|
||||||
|
|
24
go.sum
24
go.sum
|
@ -10,10 +10,10 @@ github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg6
|
||||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8=
|
github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8=
|
||||||
github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||||
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
|
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
|
||||||
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
|
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
|
||||||
github.com/Microsoft/hcsshim v0.12.0 h1:rbICA+XZFwrBef2Odk++0LjFvClNCJGRK+fsrP254Ts=
|
github.com/Microsoft/hcsshim v0.12.3 h1:LS9NXqXhMoqNCplK1ApmVSfB4UnVLRDWRapB6EIlxE0=
|
||||||
github.com/Microsoft/hcsshim v0.12.0/go.mod h1:RZV12pcHCXQ42XnlQ3pz6FZfmrC1C+R4gaOHhRNML1g=
|
github.com/Microsoft/hcsshim v0.12.3/go.mod h1:Iyl1WVpZzr+UkzjekHZbV8o5Z9ZkxNGx6CtY2Qg/JVQ=
|
||||||
github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
|
github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
|
||||||
github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
|
github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
|
||||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8=
|
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8=
|
||||||
|
@ -63,16 +63,16 @@ github.com/containernetworking/plugins v1.4.1 h1:+sJRRv8PKhLkXIl6tH1D7RMi+CbbHut
|
||||||
github.com/containernetworking/plugins v1.4.1/go.mod h1:n6FFGKcaY4o2o5msgu/UImtoC+fpQXM3076VHfHbj60=
|
github.com/containernetworking/plugins v1.4.1/go.mod h1:n6FFGKcaY4o2o5msgu/UImtoC+fpQXM3076VHfHbj60=
|
||||||
github.com/containers/common v0.58.1-0.20240509172903-2c88a3f280bb h1:mb5e8J/kErkytiM1J5hqdZENBJfSQyQ37Cgx0hinVYs=
|
github.com/containers/common v0.58.1-0.20240509172903-2c88a3f280bb h1:mb5e8J/kErkytiM1J5hqdZENBJfSQyQ37Cgx0hinVYs=
|
||||||
github.com/containers/common v0.58.1-0.20240509172903-2c88a3f280bb/go.mod h1:SCOYkp6ul27v6WoNkbgvhAhhSEM6fYKl2My9/WuESdA=
|
github.com/containers/common v0.58.1-0.20240509172903-2c88a3f280bb/go.mod h1:SCOYkp6ul27v6WoNkbgvhAhhSEM6fYKl2My9/WuESdA=
|
||||||
github.com/containers/image/v5 v5.30.2-0.20240509191815-9318d0eaaf78 h1:1fktdUOKdvMbDbAullFBjslw1VewscLwTjsH2S+6ieM=
|
github.com/containers/image/v5 v5.31.0 h1:eDFVlz5XaYICxe9dXpf23htEKvyosgkl62mJlIATXE4=
|
||||||
github.com/containers/image/v5 v5.30.2-0.20240509191815-9318d0eaaf78/go.mod h1:nw5UU0qHFIsg+3cj1u1kP/CmwEioiJrVSDgg1QLhirw=
|
github.com/containers/image/v5 v5.31.0/go.mod h1:5QfOqSackPkSbF7Qxc1DnVNnPJKQ+KWLkfEfDpK590Q=
|
||||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
|
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
|
||||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
|
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
|
||||||
github.com/containers/luksy v0.0.0-20240506205542-84b50f50f3ee h1:QU6XNrPcxyGejcEYJfpIH7LwB+yXVbb0tWxf7mZxfN4=
|
github.com/containers/luksy v0.0.0-20240506205542-84b50f50f3ee h1:QU6XNrPcxyGejcEYJfpIH7LwB+yXVbb0tWxf7mZxfN4=
|
||||||
github.com/containers/luksy v0.0.0-20240506205542-84b50f50f3ee/go.mod h1:cEhy3LVQzQqf/BHx0WS6CXmZp+RZZaUKmhQaFZ4NiiU=
|
github.com/containers/luksy v0.0.0-20240506205542-84b50f50f3ee/go.mod h1:cEhy3LVQzQqf/BHx0WS6CXmZp+RZZaUKmhQaFZ4NiiU=
|
||||||
github.com/containers/ocicrypt v1.1.10 h1:r7UR6o8+lyhkEywetubUUgcKFjOWOaWz8cEBrCPX0ic=
|
github.com/containers/ocicrypt v1.1.10 h1:r7UR6o8+lyhkEywetubUUgcKFjOWOaWz8cEBrCPX0ic=
|
||||||
github.com/containers/ocicrypt v1.1.10/go.mod h1:YfzSSr06PTHQwSTUKqDSjish9BeW1E4HUmreluQcMd8=
|
github.com/containers/ocicrypt v1.1.10/go.mod h1:YfzSSr06PTHQwSTUKqDSjish9BeW1E4HUmreluQcMd8=
|
||||||
github.com/containers/storage v1.53.1-0.20240411065836-1fd0dc1d20e5 h1:owLaLUu/RKf0x62tFm5ZQjU21oRUUIWTRMpZ0zkIt3E=
|
github.com/containers/storage v1.54.0 h1:xwYAlf6n9OnIlURQLLg3FYHbO74fQ/2W2N6EtQEUM4I=
|
||||||
github.com/containers/storage v1.53.1-0.20240411065836-1fd0dc1d20e5/go.mod h1:P4tgJNR/o42wmg+9WZtoJtOJvmZKu2dwzFQggcH9aQw=
|
github.com/containers/storage v1.54.0/go.mod h1:PlMOoinRrBSnhYODLxt4EXl0nmJt+X0kjG0Xdt9fMTw=
|
||||||
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
|
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
|
||||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||||
|
@ -90,12 +90,12 @@ github.com/disiqueira/gotree/v3 v3.0.2 h1:ik5iuLQQoufZBNPY518dXhiO5056hyNBIK9lWh
|
||||||
github.com/disiqueira/gotree/v3 v3.0.2/go.mod h1:ZuyjE4+mUQZlbpkI24AmruZKhg3VHEgPLDY8Qk+uUu8=
|
github.com/disiqueira/gotree/v3 v3.0.2/go.mod h1:ZuyjE4+mUQZlbpkI24AmruZKhg3VHEgPLDY8Qk+uUu8=
|
||||||
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
|
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
|
||||||
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||||
github.com/docker/cli v26.1.2+incompatible h1:/MWZpUMMlr1hCGyquL8QNbL1hbivQ1kLuT3Z9s1Tlpg=
|
github.com/docker/cli v26.1.3+incompatible h1:bUpXT/N0kDE3VUHI2r5VMsYQgi38kYuoC0oL9yt3lqc=
|
||||||
github.com/docker/cli v26.1.2+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
github.com/docker/cli v26.1.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||||
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
|
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
|
||||||
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||||
github.com/docker/docker v26.1.2+incompatible h1:UVX5ZOrrfTGZZYEP+ZDq3Xn9PdHNXaSYMFPDumMqG2k=
|
github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo=
|
||||||
github.com/docker/docker v26.1.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||||
github.com/docker/docker-credential-helpers v0.8.1 h1:j/eKUktUltBtMzKqmfLB0PAgqYyMHOp5vfsD1807oKo=
|
github.com/docker/docker-credential-helpers v0.8.1 h1:j/eKUktUltBtMzKqmfLB0PAgqYyMHOp5vfsD1807oKo=
|
||||||
github.com/docker/docker-credential-helpers v0.8.1/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M=
|
github.com/docker/docker-credential-helpers v0.8.1/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M=
|
||||||
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
|
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
|
||||||
|
|
|
@ -1,7 +1,3 @@
|
||||||
run:
|
|
||||||
skip-dirs:
|
|
||||||
- pkg/etw/sample
|
|
||||||
|
|
||||||
linters:
|
linters:
|
||||||
enable:
|
enable:
|
||||||
# style
|
# style
|
||||||
|
@ -20,9 +16,13 @@ linters:
|
||||||
- gofmt # files are gofmt'ed
|
- gofmt # files are gofmt'ed
|
||||||
- gosec # security
|
- gosec # security
|
||||||
- nilerr # returns nil even with non-nil error
|
- nilerr # returns nil even with non-nil error
|
||||||
|
- thelper # test helpers without t.Helper()
|
||||||
- unparam # unused function params
|
- unparam # unused function params
|
||||||
|
|
||||||
issues:
|
issues:
|
||||||
|
exclude-dirs:
|
||||||
|
- pkg/etw/sample
|
||||||
|
|
||||||
exclude-rules:
|
exclude-rules:
|
||||||
# err is very often shadowed in nested scopes
|
# err is very often shadowed in nested scopes
|
||||||
- linters:
|
- linters:
|
||||||
|
@ -69,9 +69,7 @@ linters-settings:
|
||||||
# struct order is often for Win32 compat
|
# struct order is often for Win32 compat
|
||||||
# also, ignore pointer bytes/GC issues for now until performance becomes an issue
|
# also, ignore pointer bytes/GC issues for now until performance becomes an issue
|
||||||
- fieldalignment
|
- fieldalignment
|
||||||
check-shadowing: true
|
|
||||||
nolintlint:
|
nolintlint:
|
||||||
allow-leading-space: false
|
|
||||||
require-explanation: true
|
require-explanation: true
|
||||||
require-specific: true
|
require-specific: true
|
||||||
revive:
|
revive:
|
||||||
|
|
|
@ -10,14 +10,14 @@ import (
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
"runtime"
|
||||||
"syscall"
|
|
||||||
"unicode/utf16"
|
"unicode/utf16"
|
||||||
|
|
||||||
|
"github.com/Microsoft/go-winio/internal/fs"
|
||||||
"golang.org/x/sys/windows"
|
"golang.org/x/sys/windows"
|
||||||
)
|
)
|
||||||
|
|
||||||
//sys backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead
|
//sys backupRead(h windows.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead
|
||||||
//sys backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupWrite
|
//sys backupWrite(h windows.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupWrite
|
||||||
|
|
||||||
const (
|
const (
|
||||||
BackupData = uint32(iota + 1)
|
BackupData = uint32(iota + 1)
|
||||||
|
@ -104,7 +104,7 @@ func (r *BackupStreamReader) Next() (*BackupHeader, error) {
|
||||||
if err := binary.Read(r.r, binary.LittleEndian, name); err != nil {
|
if err := binary.Read(r.r, binary.LittleEndian, name); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
hdr.Name = syscall.UTF16ToString(name)
|
hdr.Name = windows.UTF16ToString(name)
|
||||||
}
|
}
|
||||||
if wsi.StreamID == BackupSparseBlock {
|
if wsi.StreamID == BackupSparseBlock {
|
||||||
if err := binary.Read(r.r, binary.LittleEndian, &hdr.Offset); err != nil {
|
if err := binary.Read(r.r, binary.LittleEndian, &hdr.Offset); err != nil {
|
||||||
|
@ -205,7 +205,7 @@ func NewBackupFileReader(f *os.File, includeSecurity bool) *BackupFileReader {
|
||||||
// Read reads a backup stream from the file by calling the Win32 API BackupRead().
|
// Read reads a backup stream from the file by calling the Win32 API BackupRead().
|
||||||
func (r *BackupFileReader) Read(b []byte) (int, error) {
|
func (r *BackupFileReader) Read(b []byte) (int, error) {
|
||||||
var bytesRead uint32
|
var bytesRead uint32
|
||||||
err := backupRead(syscall.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx)
|
err := backupRead(windows.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, &os.PathError{Op: "BackupRead", Path: r.f.Name(), Err: err}
|
return 0, &os.PathError{Op: "BackupRead", Path: r.f.Name(), Err: err}
|
||||||
}
|
}
|
||||||
|
@ -220,7 +220,7 @@ func (r *BackupFileReader) Read(b []byte) (int, error) {
|
||||||
// the underlying file.
|
// the underlying file.
|
||||||
func (r *BackupFileReader) Close() error {
|
func (r *BackupFileReader) Close() error {
|
||||||
if r.ctx != 0 {
|
if r.ctx != 0 {
|
||||||
_ = backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx)
|
_ = backupRead(windows.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx)
|
||||||
runtime.KeepAlive(r.f)
|
runtime.KeepAlive(r.f)
|
||||||
r.ctx = 0
|
r.ctx = 0
|
||||||
}
|
}
|
||||||
|
@ -244,7 +244,7 @@ func NewBackupFileWriter(f *os.File, includeSecurity bool) *BackupFileWriter {
|
||||||
// Write restores a portion of the file using the provided backup stream.
|
// Write restores a portion of the file using the provided backup stream.
|
||||||
func (w *BackupFileWriter) Write(b []byte) (int, error) {
|
func (w *BackupFileWriter) Write(b []byte) (int, error) {
|
||||||
var bytesWritten uint32
|
var bytesWritten uint32
|
||||||
err := backupWrite(syscall.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx)
|
err := backupWrite(windows.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, &os.PathError{Op: "BackupWrite", Path: w.f.Name(), Err: err}
|
return 0, &os.PathError{Op: "BackupWrite", Path: w.f.Name(), Err: err}
|
||||||
}
|
}
|
||||||
|
@ -259,7 +259,7 @@ func (w *BackupFileWriter) Write(b []byte) (int, error) {
|
||||||
// close the underlying file.
|
// close the underlying file.
|
||||||
func (w *BackupFileWriter) Close() error {
|
func (w *BackupFileWriter) Close() error {
|
||||||
if w.ctx != 0 {
|
if w.ctx != 0 {
|
||||||
_ = backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx)
|
_ = backupWrite(windows.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx)
|
||||||
runtime.KeepAlive(w.f)
|
runtime.KeepAlive(w.f)
|
||||||
w.ctx = 0
|
w.ctx = 0
|
||||||
}
|
}
|
||||||
|
@ -271,17 +271,14 @@ func (w *BackupFileWriter) Close() error {
|
||||||
//
|
//
|
||||||
// If the file opened was a directory, it cannot be used with Readdir().
|
// If the file opened was a directory, it cannot be used with Readdir().
|
||||||
func OpenForBackup(path string, access uint32, share uint32, createmode uint32) (*os.File, error) {
|
func OpenForBackup(path string, access uint32, share uint32, createmode uint32) (*os.File, error) {
|
||||||
winPath, err := syscall.UTF16FromString(path)
|
h, err := fs.CreateFile(path,
|
||||||
if err != nil {
|
fs.AccessMask(access),
|
||||||
return nil, err
|
fs.FileShareMode(share),
|
||||||
}
|
|
||||||
h, err := syscall.CreateFile(&winPath[0],
|
|
||||||
access,
|
|
||||||
share,
|
|
||||||
nil,
|
nil,
|
||||||
createmode,
|
fs.FileCreationDisposition(createmode),
|
||||||
syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OPEN_REPARSE_POINT,
|
fs.FILE_FLAG_BACKUP_SEMANTICS|fs.FILE_FLAG_OPEN_REPARSE_POINT,
|
||||||
0)
|
0,
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = &os.PathError{Op: "open", Path: path, Err: err}
|
err = &os.PathError{Op: "open", Path: path, Err: err}
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
@ -11,7 +11,6 @@ import (
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"syscall"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/Microsoft/go-winio"
|
"github.com/Microsoft/go-winio"
|
||||||
|
@ -106,7 +105,7 @@ func BasicInfoHeader(name string, size int64, fileInfo *winio.FileBasicInfo) *ta
|
||||||
hdr.PAXRecords[hdrFileAttributes] = fmt.Sprintf("%d", fileInfo.FileAttributes)
|
hdr.PAXRecords[hdrFileAttributes] = fmt.Sprintf("%d", fileInfo.FileAttributes)
|
||||||
hdr.PAXRecords[hdrCreationTime] = formatPAXTime(time.Unix(0, fileInfo.CreationTime.Nanoseconds()))
|
hdr.PAXRecords[hdrCreationTime] = formatPAXTime(time.Unix(0, fileInfo.CreationTime.Nanoseconds()))
|
||||||
|
|
||||||
if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 {
|
if (fileInfo.FileAttributes & windows.FILE_ATTRIBUTE_DIRECTORY) != 0 {
|
||||||
hdr.Mode |= cISDIR
|
hdr.Mode |= cISDIR
|
||||||
hdr.Size = 0
|
hdr.Size = 0
|
||||||
hdr.Typeflag = tar.TypeDir
|
hdr.Typeflag = tar.TypeDir
|
||||||
|
@ -378,7 +377,7 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size
|
||||||
// WriteTarFileFromBackupStream.
|
// WriteTarFileFromBackupStream.
|
||||||
func FileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *winio.FileBasicInfo, err error) {
|
func FileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *winio.FileBasicInfo, err error) {
|
||||||
name = hdr.Name
|
name = hdr.Name
|
||||||
if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA {
|
if hdr.Typeflag == tar.TypeReg {
|
||||||
size = hdr.Size
|
size = hdr.Size
|
||||||
}
|
}
|
||||||
fileInfo = &winio.FileBasicInfo{
|
fileInfo = &winio.FileBasicInfo{
|
||||||
|
@ -396,7 +395,7 @@ func FileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *win
|
||||||
fileInfo.FileAttributes = uint32(attr)
|
fileInfo.FileAttributes = uint32(attr)
|
||||||
} else {
|
} else {
|
||||||
if hdr.Typeflag == tar.TypeDir {
|
if hdr.Typeflag == tar.TypeDir {
|
||||||
fileInfo.FileAttributes |= syscall.FILE_ATTRIBUTE_DIRECTORY
|
fileInfo.FileAttributes |= windows.FILE_ATTRIBUTE_DIRECTORY
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if creationTimeStr, ok := hdr.PAXRecords[hdrCreationTime]; ok {
|
if creationTimeStr, ok := hdr.PAXRecords[hdrCreationTime]; ok {
|
||||||
|
@ -469,7 +468,7 @@ func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA {
|
if hdr.Typeflag == tar.TypeReg {
|
||||||
bhdr := winio.BackupHeader{
|
bhdr := winio.BackupHeader{
|
||||||
Id: winio.BackupData,
|
Id: winio.BackupData,
|
||||||
Size: hdr.Size,
|
Size: hdr.Size,
|
||||||
|
|
|
@ -15,26 +15,11 @@ import (
|
||||||
"golang.org/x/sys/windows"
|
"golang.org/x/sys/windows"
|
||||||
)
|
)
|
||||||
|
|
||||||
//sys cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) = CancelIoEx
|
//sys cancelIoEx(file windows.Handle, o *windows.Overlapped) (err error) = CancelIoEx
|
||||||
//sys createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) = CreateIoCompletionPort
|
//sys createIoCompletionPort(file windows.Handle, port windows.Handle, key uintptr, threadCount uint32) (newport windows.Handle, err error) = CreateIoCompletionPort
|
||||||
//sys getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus
|
//sys getQueuedCompletionStatus(port windows.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus
|
||||||
//sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes
|
//sys setFileCompletionNotificationModes(h windows.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes
|
||||||
//sys wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult
|
//sys wsaGetOverlappedResult(h windows.Handle, o *windows.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult
|
||||||
|
|
||||||
type atomicBool int32
|
|
||||||
|
|
||||||
func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 }
|
|
||||||
func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) }
|
|
||||||
func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) }
|
|
||||||
|
|
||||||
//revive:disable-next-line:predeclared Keep "new" to maintain consistency with "atomic" pkg
|
|
||||||
func (b *atomicBool) swap(new bool) bool {
|
|
||||||
var newInt int32
|
|
||||||
if new {
|
|
||||||
newInt = 1
|
|
||||||
}
|
|
||||||
return atomic.SwapInt32((*int32)(b), newInt) == 1
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
ErrFileClosed = errors.New("file has already been closed")
|
ErrFileClosed = errors.New("file has already been closed")
|
||||||
|
@ -50,7 +35,7 @@ func (*timeoutError) Temporary() bool { return true }
|
||||||
type timeoutChan chan struct{}
|
type timeoutChan chan struct{}
|
||||||
|
|
||||||
var ioInitOnce sync.Once
|
var ioInitOnce sync.Once
|
||||||
var ioCompletionPort syscall.Handle
|
var ioCompletionPort windows.Handle
|
||||||
|
|
||||||
// ioResult contains the result of an asynchronous IO operation.
|
// ioResult contains the result of an asynchronous IO operation.
|
||||||
type ioResult struct {
|
type ioResult struct {
|
||||||
|
@ -60,12 +45,12 @@ type ioResult struct {
|
||||||
|
|
||||||
// ioOperation represents an outstanding asynchronous Win32 IO.
|
// ioOperation represents an outstanding asynchronous Win32 IO.
|
||||||
type ioOperation struct {
|
type ioOperation struct {
|
||||||
o syscall.Overlapped
|
o windows.Overlapped
|
||||||
ch chan ioResult
|
ch chan ioResult
|
||||||
}
|
}
|
||||||
|
|
||||||
func initIO() {
|
func initIO() {
|
||||||
h, err := createIoCompletionPort(syscall.InvalidHandle, 0, 0, 0xffffffff)
|
h, err := createIoCompletionPort(windows.InvalidHandle, 0, 0, 0xffffffff)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
@ -76,10 +61,10 @@ func initIO() {
|
||||||
// win32File implements Reader, Writer, and Closer on a Win32 handle without blocking in a syscall.
|
// win32File implements Reader, Writer, and Closer on a Win32 handle without blocking in a syscall.
|
||||||
// It takes ownership of this handle and will close it if it is garbage collected.
|
// It takes ownership of this handle and will close it if it is garbage collected.
|
||||||
type win32File struct {
|
type win32File struct {
|
||||||
handle syscall.Handle
|
handle windows.Handle
|
||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
wgLock sync.RWMutex
|
wgLock sync.RWMutex
|
||||||
closing atomicBool
|
closing atomic.Bool
|
||||||
socket bool
|
socket bool
|
||||||
readDeadline deadlineHandler
|
readDeadline deadlineHandler
|
||||||
writeDeadline deadlineHandler
|
writeDeadline deadlineHandler
|
||||||
|
@ -90,11 +75,11 @@ type deadlineHandler struct {
|
||||||
channel timeoutChan
|
channel timeoutChan
|
||||||
channelLock sync.RWMutex
|
channelLock sync.RWMutex
|
||||||
timer *time.Timer
|
timer *time.Timer
|
||||||
timedout atomicBool
|
timedout atomic.Bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// makeWin32File makes a new win32File from an existing file handle.
|
// makeWin32File makes a new win32File from an existing file handle.
|
||||||
func makeWin32File(h syscall.Handle) (*win32File, error) {
|
func makeWin32File(h windows.Handle) (*win32File, error) {
|
||||||
f := &win32File{handle: h}
|
f := &win32File{handle: h}
|
||||||
ioInitOnce.Do(initIO)
|
ioInitOnce.Do(initIO)
|
||||||
_, err := createIoCompletionPort(h, ioCompletionPort, 0, 0xffffffff)
|
_, err := createIoCompletionPort(h, ioCompletionPort, 0, 0xffffffff)
|
||||||
|
@ -110,7 +95,12 @@ func makeWin32File(h syscall.Handle) (*win32File, error) {
|
||||||
return f, nil
|
return f, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Deprecated: use NewOpenFile instead.
|
||||||
func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) {
|
func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) {
|
||||||
|
return NewOpenFile(windows.Handle(h))
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewOpenFile(h windows.Handle) (io.ReadWriteCloser, error) {
|
||||||
// If we return the result of makeWin32File directly, it can result in an
|
// If we return the result of makeWin32File directly, it can result in an
|
||||||
// interface-wrapped nil, rather than a nil interface value.
|
// interface-wrapped nil, rather than a nil interface value.
|
||||||
f, err := makeWin32File(h)
|
f, err := makeWin32File(h)
|
||||||
|
@ -124,13 +114,13 @@ func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) {
|
||||||
func (f *win32File) closeHandle() {
|
func (f *win32File) closeHandle() {
|
||||||
f.wgLock.Lock()
|
f.wgLock.Lock()
|
||||||
// Atomically set that we are closing, releasing the resources only once.
|
// Atomically set that we are closing, releasing the resources only once.
|
||||||
if !f.closing.swap(true) {
|
if !f.closing.Swap(true) {
|
||||||
f.wgLock.Unlock()
|
f.wgLock.Unlock()
|
||||||
// cancel all IO and wait for it to complete
|
// cancel all IO and wait for it to complete
|
||||||
_ = cancelIoEx(f.handle, nil)
|
_ = cancelIoEx(f.handle, nil)
|
||||||
f.wg.Wait()
|
f.wg.Wait()
|
||||||
// at this point, no new IO can start
|
// at this point, no new IO can start
|
||||||
syscall.Close(f.handle)
|
windows.Close(f.handle)
|
||||||
f.handle = 0
|
f.handle = 0
|
||||||
} else {
|
} else {
|
||||||
f.wgLock.Unlock()
|
f.wgLock.Unlock()
|
||||||
|
@ -145,14 +135,14 @@ func (f *win32File) Close() error {
|
||||||
|
|
||||||
// IsClosed checks if the file has been closed.
|
// IsClosed checks if the file has been closed.
|
||||||
func (f *win32File) IsClosed() bool {
|
func (f *win32File) IsClosed() bool {
|
||||||
return f.closing.isSet()
|
return f.closing.Load()
|
||||||
}
|
}
|
||||||
|
|
||||||
// prepareIO prepares for a new IO operation.
|
// prepareIO prepares for a new IO operation.
|
||||||
// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning.
|
// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning.
|
||||||
func (f *win32File) prepareIO() (*ioOperation, error) {
|
func (f *win32File) prepareIO() (*ioOperation, error) {
|
||||||
f.wgLock.RLock()
|
f.wgLock.RLock()
|
||||||
if f.closing.isSet() {
|
if f.closing.Load() {
|
||||||
f.wgLock.RUnlock()
|
f.wgLock.RUnlock()
|
||||||
return nil, ErrFileClosed
|
return nil, ErrFileClosed
|
||||||
}
|
}
|
||||||
|
@ -164,12 +154,12 @@ func (f *win32File) prepareIO() (*ioOperation, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// ioCompletionProcessor processes completed async IOs forever.
|
// ioCompletionProcessor processes completed async IOs forever.
|
||||||
func ioCompletionProcessor(h syscall.Handle) {
|
func ioCompletionProcessor(h windows.Handle) {
|
||||||
for {
|
for {
|
||||||
var bytes uint32
|
var bytes uint32
|
||||||
var key uintptr
|
var key uintptr
|
||||||
var op *ioOperation
|
var op *ioOperation
|
||||||
err := getQueuedCompletionStatus(h, &bytes, &key, &op, syscall.INFINITE)
|
err := getQueuedCompletionStatus(h, &bytes, &key, &op, windows.INFINITE)
|
||||||
if op == nil {
|
if op == nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
@ -182,11 +172,11 @@ func ioCompletionProcessor(h syscall.Handle) {
|
||||||
// asyncIO processes the return value from ReadFile or WriteFile, blocking until
|
// asyncIO processes the return value from ReadFile or WriteFile, blocking until
|
||||||
// the operation has actually completed.
|
// the operation has actually completed.
|
||||||
func (f *win32File) asyncIO(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) {
|
func (f *win32File) asyncIO(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) {
|
||||||
if err != syscall.ERROR_IO_PENDING { //nolint:errorlint // err is Errno
|
if err != windows.ERROR_IO_PENDING { //nolint:errorlint // err is Errno
|
||||||
return int(bytes), err
|
return int(bytes), err
|
||||||
}
|
}
|
||||||
|
|
||||||
if f.closing.isSet() {
|
if f.closing.Load() {
|
||||||
_ = cancelIoEx(f.handle, &c.o)
|
_ = cancelIoEx(f.handle, &c.o)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -201,8 +191,8 @@ func (f *win32File) asyncIO(c *ioOperation, d *deadlineHandler, bytes uint32, er
|
||||||
select {
|
select {
|
||||||
case r = <-c.ch:
|
case r = <-c.ch:
|
||||||
err = r.err
|
err = r.err
|
||||||
if err == syscall.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno
|
if err == windows.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno
|
||||||
if f.closing.isSet() {
|
if f.closing.Load() {
|
||||||
err = ErrFileClosed
|
err = ErrFileClosed
|
||||||
}
|
}
|
||||||
} else if err != nil && f.socket {
|
} else if err != nil && f.socket {
|
||||||
|
@ -214,7 +204,7 @@ func (f *win32File) asyncIO(c *ioOperation, d *deadlineHandler, bytes uint32, er
|
||||||
_ = cancelIoEx(f.handle, &c.o)
|
_ = cancelIoEx(f.handle, &c.o)
|
||||||
r = <-c.ch
|
r = <-c.ch
|
||||||
err = r.err
|
err = r.err
|
||||||
if err == syscall.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno
|
if err == windows.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno
|
||||||
err = ErrTimeout
|
err = ErrTimeout
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -235,23 +225,22 @@ func (f *win32File) Read(b []byte) (int, error) {
|
||||||
}
|
}
|
||||||
defer f.wg.Done()
|
defer f.wg.Done()
|
||||||
|
|
||||||
if f.readDeadline.timedout.isSet() {
|
if f.readDeadline.timedout.Load() {
|
||||||
return 0, ErrTimeout
|
return 0, ErrTimeout
|
||||||
}
|
}
|
||||||
|
|
||||||
var bytes uint32
|
var bytes uint32
|
||||||
err = syscall.ReadFile(f.handle, b, &bytes, &c.o)
|
err = windows.ReadFile(f.handle, b, &bytes, &c.o)
|
||||||
n, err := f.asyncIO(c, &f.readDeadline, bytes, err)
|
n, err := f.asyncIO(c, &f.readDeadline, bytes, err)
|
||||||
runtime.KeepAlive(b)
|
runtime.KeepAlive(b)
|
||||||
|
|
||||||
// Handle EOF conditions.
|
// Handle EOF conditions.
|
||||||
if err == nil && n == 0 && len(b) != 0 {
|
if err == nil && n == 0 && len(b) != 0 {
|
||||||
return 0, io.EOF
|
return 0, io.EOF
|
||||||
} else if err == syscall.ERROR_BROKEN_PIPE { //nolint:errorlint // err is Errno
|
} else if err == windows.ERROR_BROKEN_PIPE { //nolint:errorlint // err is Errno
|
||||||
return 0, io.EOF
|
return 0, io.EOF
|
||||||
} else {
|
|
||||||
return n, err
|
|
||||||
}
|
}
|
||||||
|
return n, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write writes to a file handle.
|
// Write writes to a file handle.
|
||||||
|
@ -262,12 +251,12 @@ func (f *win32File) Write(b []byte) (int, error) {
|
||||||
}
|
}
|
||||||
defer f.wg.Done()
|
defer f.wg.Done()
|
||||||
|
|
||||||
if f.writeDeadline.timedout.isSet() {
|
if f.writeDeadline.timedout.Load() {
|
||||||
return 0, ErrTimeout
|
return 0, ErrTimeout
|
||||||
}
|
}
|
||||||
|
|
||||||
var bytes uint32
|
var bytes uint32
|
||||||
err = syscall.WriteFile(f.handle, b, &bytes, &c.o)
|
err = windows.WriteFile(f.handle, b, &bytes, &c.o)
|
||||||
n, err := f.asyncIO(c, &f.writeDeadline, bytes, err)
|
n, err := f.asyncIO(c, &f.writeDeadline, bytes, err)
|
||||||
runtime.KeepAlive(b)
|
runtime.KeepAlive(b)
|
||||||
return n, err
|
return n, err
|
||||||
|
@ -282,7 +271,7 @@ func (f *win32File) SetWriteDeadline(deadline time.Time) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *win32File) Flush() error {
|
func (f *win32File) Flush() error {
|
||||||
return syscall.FlushFileBuffers(f.handle)
|
return windows.FlushFileBuffers(f.handle)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *win32File) Fd() uintptr {
|
func (f *win32File) Fd() uintptr {
|
||||||
|
@ -299,7 +288,7 @@ func (d *deadlineHandler) set(deadline time.Time) error {
|
||||||
}
|
}
|
||||||
d.timer = nil
|
d.timer = nil
|
||||||
}
|
}
|
||||||
d.timedout.setFalse()
|
d.timedout.Store(false)
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-d.channel:
|
case <-d.channel:
|
||||||
|
@ -314,7 +303,7 @@ func (d *deadlineHandler) set(deadline time.Time) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
timeoutIO := func() {
|
timeoutIO := func() {
|
||||||
d.timedout.setTrue()
|
d.timedout.Store(true)
|
||||||
close(d.channel)
|
close(d.channel)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -18,9 +18,18 @@ type FileBasicInfo struct {
|
||||||
_ uint32 // padding
|
_ uint32 // padding
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// alignedFileBasicInfo is a FileBasicInfo, but aligned to uint64 by containing
|
||||||
|
// uint64 rather than windows.Filetime. Filetime contains two uint32s. uint64
|
||||||
|
// alignment is necessary to pass this as FILE_BASIC_INFO.
|
||||||
|
type alignedFileBasicInfo struct {
|
||||||
|
CreationTime, LastAccessTime, LastWriteTime, ChangeTime uint64
|
||||||
|
FileAttributes uint32
|
||||||
|
_ uint32 // padding
|
||||||
|
}
|
||||||
|
|
||||||
// GetFileBasicInfo retrieves times and attributes for a file.
|
// GetFileBasicInfo retrieves times and attributes for a file.
|
||||||
func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) {
|
func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) {
|
||||||
bi := &FileBasicInfo{}
|
bi := &alignedFileBasicInfo{}
|
||||||
if err := windows.GetFileInformationByHandleEx(
|
if err := windows.GetFileInformationByHandleEx(
|
||||||
windows.Handle(f.Fd()),
|
windows.Handle(f.Fd()),
|
||||||
windows.FileBasicInfo,
|
windows.FileBasicInfo,
|
||||||
|
@ -30,16 +39,21 @@ func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) {
|
||||||
return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
|
return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
|
||||||
}
|
}
|
||||||
runtime.KeepAlive(f)
|
runtime.KeepAlive(f)
|
||||||
return bi, nil
|
// Reinterpret the alignedFileBasicInfo as a FileBasicInfo so it matches the
|
||||||
|
// public API of this module. The data may be unnecessarily aligned.
|
||||||
|
return (*FileBasicInfo)(unsafe.Pointer(bi)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetFileBasicInfo sets times and attributes for a file.
|
// SetFileBasicInfo sets times and attributes for a file.
|
||||||
func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error {
|
func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error {
|
||||||
|
// Create an alignedFileBasicInfo based on a FileBasicInfo. The copy is
|
||||||
|
// suitable to pass to GetFileInformationByHandleEx.
|
||||||
|
biAligned := *(*alignedFileBasicInfo)(unsafe.Pointer(bi))
|
||||||
if err := windows.SetFileInformationByHandle(
|
if err := windows.SetFileInformationByHandle(
|
||||||
windows.Handle(f.Fd()),
|
windows.Handle(f.Fd()),
|
||||||
windows.FileBasicInfo,
|
windows.FileBasicInfo,
|
||||||
(*byte)(unsafe.Pointer(bi)),
|
(*byte)(unsafe.Pointer(&biAligned)),
|
||||||
uint32(unsafe.Sizeof(*bi)),
|
uint32(unsafe.Sizeof(biAligned)),
|
||||||
); err != nil {
|
); err != nil {
|
||||||
return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err}
|
return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err}
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,7 +10,6 @@ import (
|
||||||
"io"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
"syscall"
|
|
||||||
"time"
|
"time"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
|
@ -181,13 +180,13 @@ type HvsockConn struct {
|
||||||
var _ net.Conn = &HvsockConn{}
|
var _ net.Conn = &HvsockConn{}
|
||||||
|
|
||||||
func newHVSocket() (*win32File, error) {
|
func newHVSocket() (*win32File, error) {
|
||||||
fd, err := syscall.Socket(afHVSock, syscall.SOCK_STREAM, 1)
|
fd, err := windows.Socket(afHVSock, windows.SOCK_STREAM, 1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, os.NewSyscallError("socket", err)
|
return nil, os.NewSyscallError("socket", err)
|
||||||
}
|
}
|
||||||
f, err := makeWin32File(fd)
|
f, err := makeWin32File(fd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
syscall.Close(fd)
|
windows.Close(fd)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
f.socket = true
|
f.socket = true
|
||||||
|
@ -197,16 +196,24 @@ func newHVSocket() (*win32File, error) {
|
||||||
// ListenHvsock listens for connections on the specified hvsock address.
|
// ListenHvsock listens for connections on the specified hvsock address.
|
||||||
func ListenHvsock(addr *HvsockAddr) (_ *HvsockListener, err error) {
|
func ListenHvsock(addr *HvsockAddr) (_ *HvsockListener, err error) {
|
||||||
l := &HvsockListener{addr: *addr}
|
l := &HvsockListener{addr: *addr}
|
||||||
sock, err := newHVSocket()
|
|
||||||
|
var sock *win32File
|
||||||
|
sock, err = newHVSocket()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, l.opErr("listen", err)
|
return nil, l.opErr("listen", err)
|
||||||
}
|
}
|
||||||
|
defer func() {
|
||||||
|
if err != nil {
|
||||||
|
_ = sock.Close()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
sa := addr.raw()
|
sa := addr.raw()
|
||||||
err = socket.Bind(windows.Handle(sock.handle), &sa)
|
err = socket.Bind(sock.handle, &sa)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, l.opErr("listen", os.NewSyscallError("socket", err))
|
return nil, l.opErr("listen", os.NewSyscallError("socket", err))
|
||||||
}
|
}
|
||||||
err = syscall.Listen(sock.handle, 16)
|
err = windows.Listen(sock.handle, 16)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, l.opErr("listen", os.NewSyscallError("listen", err))
|
return nil, l.opErr("listen", os.NewSyscallError("listen", err))
|
||||||
}
|
}
|
||||||
|
@ -246,7 +253,7 @@ func (l *HvsockListener) Accept() (_ net.Conn, err error) {
|
||||||
var addrbuf [addrlen * 2]byte
|
var addrbuf [addrlen * 2]byte
|
||||||
|
|
||||||
var bytes uint32
|
var bytes uint32
|
||||||
err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0 /* rxdatalen */, addrlen, addrlen, &bytes, &c.o)
|
err = windows.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0 /* rxdatalen */, addrlen, addrlen, &bytes, &c.o)
|
||||||
if _, err = l.sock.asyncIO(c, nil, bytes, err); err != nil {
|
if _, err = l.sock.asyncIO(c, nil, bytes, err); err != nil {
|
||||||
return nil, l.opErr("accept", os.NewSyscallError("acceptex", err))
|
return nil, l.opErr("accept", os.NewSyscallError("acceptex", err))
|
||||||
}
|
}
|
||||||
|
@ -263,7 +270,7 @@ func (l *HvsockListener) Accept() (_ net.Conn, err error) {
|
||||||
conn.remote.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[addrlen])))
|
conn.remote.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[addrlen])))
|
||||||
|
|
||||||
// initialize the accepted socket and update its properties with those of the listening socket
|
// initialize the accepted socket and update its properties with those of the listening socket
|
||||||
if err = windows.Setsockopt(windows.Handle(sock.handle),
|
if err = windows.Setsockopt(sock.handle,
|
||||||
windows.SOL_SOCKET, windows.SO_UPDATE_ACCEPT_CONTEXT,
|
windows.SOL_SOCKET, windows.SO_UPDATE_ACCEPT_CONTEXT,
|
||||||
(*byte)(unsafe.Pointer(&l.sock.handle)), int32(unsafe.Sizeof(l.sock.handle))); err != nil {
|
(*byte)(unsafe.Pointer(&l.sock.handle)), int32(unsafe.Sizeof(l.sock.handle))); err != nil {
|
||||||
return nil, conn.opErr("accept", os.NewSyscallError("setsockopt", err))
|
return nil, conn.opErr("accept", os.NewSyscallError("setsockopt", err))
|
||||||
|
@ -334,7 +341,7 @@ func (d *HvsockDialer) Dial(ctx context.Context, addr *HvsockAddr) (conn *Hvsock
|
||||||
}()
|
}()
|
||||||
|
|
||||||
sa := addr.raw()
|
sa := addr.raw()
|
||||||
err = socket.Bind(windows.Handle(sock.handle), &sa)
|
err = socket.Bind(sock.handle, &sa)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, conn.opErr(op, os.NewSyscallError("bind", err))
|
return nil, conn.opErr(op, os.NewSyscallError("bind", err))
|
||||||
}
|
}
|
||||||
|
@ -347,7 +354,7 @@ func (d *HvsockDialer) Dial(ctx context.Context, addr *HvsockAddr) (conn *Hvsock
|
||||||
var bytes uint32
|
var bytes uint32
|
||||||
for i := uint(0); i <= d.Retries; i++ {
|
for i := uint(0); i <= d.Retries; i++ {
|
||||||
err = socket.ConnectEx(
|
err = socket.ConnectEx(
|
||||||
windows.Handle(sock.handle),
|
sock.handle,
|
||||||
&sa,
|
&sa,
|
||||||
nil, // sendBuf
|
nil, // sendBuf
|
||||||
0, // sendDataLen
|
0, // sendDataLen
|
||||||
|
@ -367,7 +374,7 @@ func (d *HvsockDialer) Dial(ctx context.Context, addr *HvsockAddr) (conn *Hvsock
|
||||||
|
|
||||||
// update the connection properties, so shutdown can be used
|
// update the connection properties, so shutdown can be used
|
||||||
if err = windows.Setsockopt(
|
if err = windows.Setsockopt(
|
||||||
windows.Handle(sock.handle),
|
sock.handle,
|
||||||
windows.SOL_SOCKET,
|
windows.SOL_SOCKET,
|
||||||
windows.SO_UPDATE_CONNECT_CONTEXT,
|
windows.SO_UPDATE_CONNECT_CONTEXT,
|
||||||
nil, // optvalue
|
nil, // optvalue
|
||||||
|
@ -378,7 +385,7 @@ func (d *HvsockDialer) Dial(ctx context.Context, addr *HvsockAddr) (conn *Hvsock
|
||||||
|
|
||||||
// get the local name
|
// get the local name
|
||||||
var sal rawHvsockAddr
|
var sal rawHvsockAddr
|
||||||
err = socket.GetSockName(windows.Handle(sock.handle), &sal)
|
err = socket.GetSockName(sock.handle, &sal)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, conn.opErr(op, os.NewSyscallError("getsockname", err))
|
return nil, conn.opErr(op, os.NewSyscallError("getsockname", err))
|
||||||
}
|
}
|
||||||
|
@ -421,7 +428,7 @@ func (d *HvsockDialer) redialWait(ctx context.Context) (err error) {
|
||||||
return ctx.Err()
|
return ctx.Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
// assumes error is a plain, unwrapped syscall.Errno provided by direct syscall.
|
// assumes error is a plain, unwrapped windows.Errno provided by direct syscall.
|
||||||
func canRedial(err error) bool {
|
func canRedial(err error) bool {
|
||||||
//nolint:errorlint // guaranteed to be an Errno
|
//nolint:errorlint // guaranteed to be an Errno
|
||||||
switch err {
|
switch err {
|
||||||
|
@ -447,9 +454,9 @@ func (conn *HvsockConn) Read(b []byte) (int, error) {
|
||||||
return 0, conn.opErr("read", err)
|
return 0, conn.opErr("read", err)
|
||||||
}
|
}
|
||||||
defer conn.sock.wg.Done()
|
defer conn.sock.wg.Done()
|
||||||
buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))}
|
buf := windows.WSABuf{Buf: &b[0], Len: uint32(len(b))}
|
||||||
var flags, bytes uint32
|
var flags, bytes uint32
|
||||||
err = syscall.WSARecv(conn.sock.handle, &buf, 1, &bytes, &flags, &c.o, nil)
|
err = windows.WSARecv(conn.sock.handle, &buf, 1, &bytes, &flags, &c.o, nil)
|
||||||
n, err := conn.sock.asyncIO(c, &conn.sock.readDeadline, bytes, err)
|
n, err := conn.sock.asyncIO(c, &conn.sock.readDeadline, bytes, err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
var eno windows.Errno
|
var eno windows.Errno
|
||||||
|
@ -482,9 +489,9 @@ func (conn *HvsockConn) write(b []byte) (int, error) {
|
||||||
return 0, conn.opErr("write", err)
|
return 0, conn.opErr("write", err)
|
||||||
}
|
}
|
||||||
defer conn.sock.wg.Done()
|
defer conn.sock.wg.Done()
|
||||||
buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))}
|
buf := windows.WSABuf{Buf: &b[0], Len: uint32(len(b))}
|
||||||
var bytes uint32
|
var bytes uint32
|
||||||
err = syscall.WSASend(conn.sock.handle, &buf, 1, &bytes, 0, &c.o, nil)
|
err = windows.WSASend(conn.sock.handle, &buf, 1, &bytes, 0, &c.o, nil)
|
||||||
n, err := conn.sock.asyncIO(c, &conn.sock.writeDeadline, bytes, err)
|
n, err := conn.sock.asyncIO(c, &conn.sock.writeDeadline, bytes, err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
var eno windows.Errno
|
var eno windows.Errno
|
||||||
|
@ -511,7 +518,7 @@ func (conn *HvsockConn) shutdown(how int) error {
|
||||||
return socket.ErrSocketClosed
|
return socket.ErrSocketClosed
|
||||||
}
|
}
|
||||||
|
|
||||||
err := syscall.Shutdown(conn.sock.handle, how)
|
err := windows.Shutdown(conn.sock.handle, how)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// If the connection was closed, shutdowns fail with "not connected"
|
// If the connection was closed, shutdowns fail with "not connected"
|
||||||
if errors.Is(err, windows.WSAENOTCONN) ||
|
if errors.Is(err, windows.WSAENOTCONN) ||
|
||||||
|
@ -525,7 +532,7 @@ func (conn *HvsockConn) shutdown(how int) error {
|
||||||
|
|
||||||
// CloseRead shuts down the read end of the socket, preventing future read operations.
|
// CloseRead shuts down the read end of the socket, preventing future read operations.
|
||||||
func (conn *HvsockConn) CloseRead() error {
|
func (conn *HvsockConn) CloseRead() error {
|
||||||
err := conn.shutdown(syscall.SHUT_RD)
|
err := conn.shutdown(windows.SHUT_RD)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return conn.opErr("closeread", err)
|
return conn.opErr("closeread", err)
|
||||||
}
|
}
|
||||||
|
@ -535,7 +542,7 @@ func (conn *HvsockConn) CloseRead() error {
|
||||||
// CloseWrite shuts down the write end of the socket, preventing future write operations and
|
// CloseWrite shuts down the write end of the socket, preventing future write operations and
|
||||||
// notifying the other endpoint that no more data will be written.
|
// notifying the other endpoint that no more data will be written.
|
||||||
func (conn *HvsockConn) CloseWrite() error {
|
func (conn *HvsockConn) CloseWrite() error {
|
||||||
err := conn.shutdown(syscall.SHUT_WR)
|
err := conn.shutdown(windows.SHUT_WR)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return conn.opErr("closewrite", err)
|
return conn.opErr("closewrite", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,12 +11,14 @@ import (
|
||||||
//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go fs.go
|
//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go fs.go
|
||||||
|
|
||||||
// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew
|
// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew
|
||||||
//sys CreateFile(name string, access AccessMask, mode FileShareMode, sa *syscall.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) [failretval==windows.InvalidHandle] = CreateFileW
|
//sys CreateFile(name string, access AccessMask, mode FileShareMode, sa *windows.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) [failretval==windows.InvalidHandle] = CreateFileW
|
||||||
|
|
||||||
const NullHandle windows.Handle = 0
|
const NullHandle windows.Handle = 0
|
||||||
|
|
||||||
// AccessMask defines standard, specific, and generic rights.
|
// AccessMask defines standard, specific, and generic rights.
|
||||||
//
|
//
|
||||||
|
// Used with CreateFile and NtCreateFile (and co.).
|
||||||
|
//
|
||||||
// Bitmask:
|
// Bitmask:
|
||||||
// 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
|
// 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
|
||||||
// 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
|
// 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
|
||||||
|
@ -47,6 +49,12 @@ const (
|
||||||
// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew#parameters
|
// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew#parameters
|
||||||
FILE_ANY_ACCESS AccessMask = 0
|
FILE_ANY_ACCESS AccessMask = 0
|
||||||
|
|
||||||
|
GENERIC_READ AccessMask = 0x8000_0000
|
||||||
|
GENERIC_WRITE AccessMask = 0x4000_0000
|
||||||
|
GENERIC_EXECUTE AccessMask = 0x2000_0000
|
||||||
|
GENERIC_ALL AccessMask = 0x1000_0000
|
||||||
|
ACCESS_SYSTEM_SECURITY AccessMask = 0x0100_0000
|
||||||
|
|
||||||
// Specific Object Access
|
// Specific Object Access
|
||||||
// from ntioapi.h
|
// from ntioapi.h
|
||||||
|
|
||||||
|
@ -124,14 +132,32 @@ const (
|
||||||
TRUNCATE_EXISTING FileCreationDisposition = 0x05
|
TRUNCATE_EXISTING FileCreationDisposition = 0x05
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Create disposition values for NtCreate*
|
||||||
|
type NTFileCreationDisposition uint32
|
||||||
|
|
||||||
|
//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
|
||||||
|
const (
|
||||||
|
// From ntioapi.h
|
||||||
|
|
||||||
|
FILE_SUPERSEDE NTFileCreationDisposition = 0x00
|
||||||
|
FILE_OPEN NTFileCreationDisposition = 0x01
|
||||||
|
FILE_CREATE NTFileCreationDisposition = 0x02
|
||||||
|
FILE_OPEN_IF NTFileCreationDisposition = 0x03
|
||||||
|
FILE_OVERWRITE NTFileCreationDisposition = 0x04
|
||||||
|
FILE_OVERWRITE_IF NTFileCreationDisposition = 0x05
|
||||||
|
FILE_MAXIMUM_DISPOSITION NTFileCreationDisposition = 0x05
|
||||||
|
)
|
||||||
|
|
||||||
// CreateFile and co. take flags or attributes together as one parameter.
|
// CreateFile and co. take flags or attributes together as one parameter.
|
||||||
// Define alias until we can use generics to allow both
|
// Define alias until we can use generics to allow both
|
||||||
|
//
|
||||||
// https://learn.microsoft.com/en-us/windows/win32/fileio/file-attribute-constants
|
// https://learn.microsoft.com/en-us/windows/win32/fileio/file-attribute-constants
|
||||||
type FileFlagOrAttribute uint32
|
type FileFlagOrAttribute uint32
|
||||||
|
|
||||||
//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
|
//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
|
||||||
const ( // from winnt.h
|
const (
|
||||||
|
// from winnt.h
|
||||||
|
|
||||||
FILE_FLAG_WRITE_THROUGH FileFlagOrAttribute = 0x8000_0000
|
FILE_FLAG_WRITE_THROUGH FileFlagOrAttribute = 0x8000_0000
|
||||||
FILE_FLAG_OVERLAPPED FileFlagOrAttribute = 0x4000_0000
|
FILE_FLAG_OVERLAPPED FileFlagOrAttribute = 0x4000_0000
|
||||||
FILE_FLAG_NO_BUFFERING FileFlagOrAttribute = 0x2000_0000
|
FILE_FLAG_NO_BUFFERING FileFlagOrAttribute = 0x2000_0000
|
||||||
|
@ -145,17 +171,51 @@ const ( // from winnt.h
|
||||||
FILE_FLAG_FIRST_PIPE_INSTANCE FileFlagOrAttribute = 0x0008_0000
|
FILE_FLAG_FIRST_PIPE_INSTANCE FileFlagOrAttribute = 0x0008_0000
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// NtCreate* functions take a dedicated CreateOptions parameter.
|
||||||
|
//
|
||||||
|
// https://learn.microsoft.com/en-us/windows/win32/api/Winternl/nf-winternl-ntcreatefile
|
||||||
|
//
|
||||||
|
// https://learn.microsoft.com/en-us/windows/win32/devnotes/nt-create-named-pipe-file
|
||||||
|
type NTCreateOptions uint32
|
||||||
|
|
||||||
|
//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
|
||||||
|
const (
|
||||||
|
// From ntioapi.h
|
||||||
|
|
||||||
|
FILE_DIRECTORY_FILE NTCreateOptions = 0x0000_0001
|
||||||
|
FILE_WRITE_THROUGH NTCreateOptions = 0x0000_0002
|
||||||
|
FILE_SEQUENTIAL_ONLY NTCreateOptions = 0x0000_0004
|
||||||
|
FILE_NO_INTERMEDIATE_BUFFERING NTCreateOptions = 0x0000_0008
|
||||||
|
|
||||||
|
FILE_SYNCHRONOUS_IO_ALERT NTCreateOptions = 0x0000_0010
|
||||||
|
FILE_SYNCHRONOUS_IO_NONALERT NTCreateOptions = 0x0000_0020
|
||||||
|
FILE_NON_DIRECTORY_FILE NTCreateOptions = 0x0000_0040
|
||||||
|
FILE_CREATE_TREE_CONNECTION NTCreateOptions = 0x0000_0080
|
||||||
|
|
||||||
|
FILE_COMPLETE_IF_OPLOCKED NTCreateOptions = 0x0000_0100
|
||||||
|
FILE_NO_EA_KNOWLEDGE NTCreateOptions = 0x0000_0200
|
||||||
|
FILE_DISABLE_TUNNELING NTCreateOptions = 0x0000_0400
|
||||||
|
FILE_RANDOM_ACCESS NTCreateOptions = 0x0000_0800
|
||||||
|
|
||||||
|
FILE_DELETE_ON_CLOSE NTCreateOptions = 0x0000_1000
|
||||||
|
FILE_OPEN_BY_FILE_ID NTCreateOptions = 0x0000_2000
|
||||||
|
FILE_OPEN_FOR_BACKUP_INTENT NTCreateOptions = 0x0000_4000
|
||||||
|
FILE_NO_COMPRESSION NTCreateOptions = 0x0000_8000
|
||||||
|
)
|
||||||
|
|
||||||
type FileSQSFlag = FileFlagOrAttribute
|
type FileSQSFlag = FileFlagOrAttribute
|
||||||
|
|
||||||
//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
|
//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
|
||||||
const ( // from winbase.h
|
const (
|
||||||
|
// from winbase.h
|
||||||
|
|
||||||
SECURITY_ANONYMOUS FileSQSFlag = FileSQSFlag(SecurityAnonymous << 16)
|
SECURITY_ANONYMOUS FileSQSFlag = FileSQSFlag(SecurityAnonymous << 16)
|
||||||
SECURITY_IDENTIFICATION FileSQSFlag = FileSQSFlag(SecurityIdentification << 16)
|
SECURITY_IDENTIFICATION FileSQSFlag = FileSQSFlag(SecurityIdentification << 16)
|
||||||
SECURITY_IMPERSONATION FileSQSFlag = FileSQSFlag(SecurityImpersonation << 16)
|
SECURITY_IMPERSONATION FileSQSFlag = FileSQSFlag(SecurityImpersonation << 16)
|
||||||
SECURITY_DELEGATION FileSQSFlag = FileSQSFlag(SecurityDelegation << 16)
|
SECURITY_DELEGATION FileSQSFlag = FileSQSFlag(SecurityDelegation << 16)
|
||||||
|
|
||||||
SECURITY_SQOS_PRESENT FileSQSFlag = 0x00100000
|
SECURITY_SQOS_PRESENT FileSQSFlag = 0x0010_0000
|
||||||
SECURITY_VALID_SQOS_FLAGS FileSQSFlag = 0x001F0000
|
SECURITY_VALID_SQOS_FLAGS FileSQSFlag = 0x001F_0000
|
||||||
)
|
)
|
||||||
|
|
||||||
// GetFinalPathNameByHandle flags
|
// GetFinalPathNameByHandle flags
|
||||||
|
|
|
@ -33,9 +33,6 @@ func errnoErr(e syscall.Errno) error {
|
||||||
case errnoERROR_IO_PENDING:
|
case errnoERROR_IO_PENDING:
|
||||||
return errERROR_IO_PENDING
|
return errERROR_IO_PENDING
|
||||||
}
|
}
|
||||||
// TODO: add more here, after collecting data on the common
|
|
||||||
// error values see on Windows. (perhaps when running
|
|
||||||
// all.bat?)
|
|
||||||
return e
|
return e
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -45,7 +42,7 @@ var (
|
||||||
procCreateFileW = modkernel32.NewProc("CreateFileW")
|
procCreateFileW = modkernel32.NewProc("CreateFileW")
|
||||||
)
|
)
|
||||||
|
|
||||||
func CreateFile(name string, access AccessMask, mode FileShareMode, sa *syscall.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) {
|
func CreateFile(name string, access AccessMask, mode FileShareMode, sa *windows.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) {
|
||||||
var _p0 *uint16
|
var _p0 *uint16
|
||||||
_p0, err = syscall.UTF16PtrFromString(name)
|
_p0, err = syscall.UTF16PtrFromString(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -54,8 +51,8 @@ func CreateFile(name string, access AccessMask, mode FileShareMode, sa *syscall.
|
||||||
return _CreateFile(_p0, access, mode, sa, createmode, attrs, templatefile)
|
return _CreateFile(_p0, access, mode, sa, createmode, attrs, templatefile)
|
||||||
}
|
}
|
||||||
|
|
||||||
func _CreateFile(name *uint16, access AccessMask, mode FileShareMode, sa *syscall.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) {
|
func _CreateFile(name *uint16, access AccessMask, mode FileShareMode, sa *windows.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) {
|
||||||
r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0)
|
r0, _, e1 := syscall.SyscallN(procCreateFileW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile))
|
||||||
handle = windows.Handle(r0)
|
handle = windows.Handle(r0)
|
||||||
if handle == windows.InvalidHandle {
|
if handle == windows.InvalidHandle {
|
||||||
err = errnoErr(e1)
|
err = errnoErr(e1)
|
||||||
|
|
|
@ -156,9 +156,7 @@ func connectEx(
|
||||||
bytesSent *uint32,
|
bytesSent *uint32,
|
||||||
overlapped *windows.Overlapped,
|
overlapped *windows.Overlapped,
|
||||||
) (err error) {
|
) (err error) {
|
||||||
// todo: after upgrading to 1.18, switch from syscall.Syscall9 to syscall.SyscallN
|
r1, _, e1 := syscall.SyscallN(connectExFunc.addr,
|
||||||
r1, _, e1 := syscall.Syscall9(connectExFunc.addr,
|
|
||||||
7,
|
|
||||||
uintptr(s),
|
uintptr(s),
|
||||||
uintptr(name),
|
uintptr(name),
|
||||||
uintptr(namelen),
|
uintptr(namelen),
|
||||||
|
@ -166,8 +164,8 @@ func connectEx(
|
||||||
uintptr(sendDataLen),
|
uintptr(sendDataLen),
|
||||||
uintptr(unsafe.Pointer(bytesSent)),
|
uintptr(unsafe.Pointer(bytesSent)),
|
||||||
uintptr(unsafe.Pointer(overlapped)),
|
uintptr(unsafe.Pointer(overlapped)),
|
||||||
0,
|
)
|
||||||
0)
|
|
||||||
if r1 == 0 {
|
if r1 == 0 {
|
||||||
if e1 != 0 {
|
if e1 != 0 {
|
||||||
err = error(e1)
|
err = error(e1)
|
||||||
|
|
|
@ -33,9 +33,6 @@ func errnoErr(e syscall.Errno) error {
|
||||||
case errnoERROR_IO_PENDING:
|
case errnoERROR_IO_PENDING:
|
||||||
return errERROR_IO_PENDING
|
return errERROR_IO_PENDING
|
||||||
}
|
}
|
||||||
// TODO: add more here, after collecting data on the common
|
|
||||||
// error values see on Windows. (perhaps when running
|
|
||||||
// all.bat?)
|
|
||||||
return e
|
return e
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -48,7 +45,7 @@ var (
|
||||||
)
|
)
|
||||||
|
|
||||||
func bind(s windows.Handle, name unsafe.Pointer, namelen int32) (err error) {
|
func bind(s windows.Handle, name unsafe.Pointer, namelen int32) (err error) {
|
||||||
r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen))
|
r1, _, e1 := syscall.SyscallN(procbind.Addr(), uintptr(s), uintptr(name), uintptr(namelen))
|
||||||
if r1 == socketError {
|
if r1 == socketError {
|
||||||
err = errnoErr(e1)
|
err = errnoErr(e1)
|
||||||
}
|
}
|
||||||
|
@ -56,7 +53,7 @@ func bind(s windows.Handle, name unsafe.Pointer, namelen int32) (err error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func getpeername(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) {
|
func getpeername(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) {
|
||||||
r1, _, e1 := syscall.Syscall(procgetpeername.Addr(), 3, uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen)))
|
r1, _, e1 := syscall.SyscallN(procgetpeername.Addr(), uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen)))
|
||||||
if r1 == socketError {
|
if r1 == socketError {
|
||||||
err = errnoErr(e1)
|
err = errnoErr(e1)
|
||||||
}
|
}
|
||||||
|
@ -64,7 +61,7 @@ func getpeername(s windows.Handle, name unsafe.Pointer, namelen *int32) (err err
|
||||||
}
|
}
|
||||||
|
|
||||||
func getsockname(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) {
|
func getsockname(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) {
|
||||||
r1, _, e1 := syscall.Syscall(procgetsockname.Addr(), 3, uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen)))
|
r1, _, e1 := syscall.SyscallN(procgetsockname.Addr(), uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen)))
|
||||||
if r1 == socketError {
|
if r1 == socketError {
|
||||||
err = errnoErr(e1)
|
err = errnoErr(e1)
|
||||||
}
|
}
|
||||||
|
|
|
@ -62,7 +62,7 @@ func (b *WString) Free() {
|
||||||
// ResizeTo grows the buffer to at least c and returns the new capacity, freeing the
|
// ResizeTo grows the buffer to at least c and returns the new capacity, freeing the
|
||||||
// previous buffer back into pool.
|
// previous buffer back into pool.
|
||||||
func (b *WString) ResizeTo(c uint32) uint32 {
|
func (b *WString) ResizeTo(c uint32) uint32 {
|
||||||
// allready sufficient (or n is 0)
|
// already sufficient (or n is 0)
|
||||||
if c <= b.Cap() {
|
if c <= b.Cap() {
|
||||||
return b.Cap()
|
return b.Cap()
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,7 +11,6 @@ import (
|
||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
"runtime"
|
||||||
"syscall"
|
|
||||||
"time"
|
"time"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
|
@ -20,20 +19,44 @@ import (
|
||||||
"github.com/Microsoft/go-winio/internal/fs"
|
"github.com/Microsoft/go-winio/internal/fs"
|
||||||
)
|
)
|
||||||
|
|
||||||
//sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe
|
//sys connectNamedPipe(pipe windows.Handle, o *windows.Overlapped) (err error) = ConnectNamedPipe
|
||||||
//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW
|
//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *windows.SecurityAttributes) (handle windows.Handle, err error) [failretval==windows.InvalidHandle] = CreateNamedPipeW
|
||||||
//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo
|
//sys disconnectNamedPipe(pipe windows.Handle) (err error) = DisconnectNamedPipe
|
||||||
//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW
|
//sys getNamedPipeInfo(pipe windows.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo
|
||||||
//sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc
|
//sys getNamedPipeHandleState(pipe windows.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW
|
||||||
//sys ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) = ntdll.NtCreateNamedPipeFile
|
//sys ntCreateNamedPipeFile(pipe *windows.Handle, access ntAccessMask, oa *objectAttributes, iosb *ioStatusBlock, share ntFileShareMode, disposition ntFileCreationDisposition, options ntFileOptions, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) = ntdll.NtCreateNamedPipeFile
|
||||||
//sys rtlNtStatusToDosError(status ntStatus) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb
|
//sys rtlNtStatusToDosError(status ntStatus) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb
|
||||||
//sys rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntStatus) = ntdll.RtlDosPathNameToNtPathName_U
|
//sys rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntStatus) = ntdll.RtlDosPathNameToNtPathName_U
|
||||||
//sys rtlDefaultNpAcl(dacl *uintptr) (status ntStatus) = ntdll.RtlDefaultNpAcl
|
//sys rtlDefaultNpAcl(dacl *uintptr) (status ntStatus) = ntdll.RtlDefaultNpAcl
|
||||||
|
|
||||||
|
type PipeConn interface {
|
||||||
|
net.Conn
|
||||||
|
Disconnect() error
|
||||||
|
Flush() error
|
||||||
|
}
|
||||||
|
|
||||||
|
// type aliases for mkwinsyscall code
|
||||||
|
type (
|
||||||
|
ntAccessMask = fs.AccessMask
|
||||||
|
ntFileShareMode = fs.FileShareMode
|
||||||
|
ntFileCreationDisposition = fs.NTFileCreationDisposition
|
||||||
|
ntFileOptions = fs.NTCreateOptions
|
||||||
|
)
|
||||||
|
|
||||||
type ioStatusBlock struct {
|
type ioStatusBlock struct {
|
||||||
Status, Information uintptr
|
Status, Information uintptr
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// typedef struct _OBJECT_ATTRIBUTES {
|
||||||
|
// ULONG Length;
|
||||||
|
// HANDLE RootDirectory;
|
||||||
|
// PUNICODE_STRING ObjectName;
|
||||||
|
// ULONG Attributes;
|
||||||
|
// PVOID SecurityDescriptor;
|
||||||
|
// PVOID SecurityQualityOfService;
|
||||||
|
// } OBJECT_ATTRIBUTES;
|
||||||
|
//
|
||||||
|
// https://learn.microsoft.com/en-us/windows/win32/api/ntdef/ns-ntdef-_object_attributes
|
||||||
type objectAttributes struct {
|
type objectAttributes struct {
|
||||||
Length uintptr
|
Length uintptr
|
||||||
RootDirectory uintptr
|
RootDirectory uintptr
|
||||||
|
@ -49,6 +72,17 @@ type unicodeString struct {
|
||||||
Buffer uintptr
|
Buffer uintptr
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// typedef struct _SECURITY_DESCRIPTOR {
|
||||||
|
// BYTE Revision;
|
||||||
|
// BYTE Sbz1;
|
||||||
|
// SECURITY_DESCRIPTOR_CONTROL Control;
|
||||||
|
// PSID Owner;
|
||||||
|
// PSID Group;
|
||||||
|
// PACL Sacl;
|
||||||
|
// PACL Dacl;
|
||||||
|
// } SECURITY_DESCRIPTOR, *PISECURITY_DESCRIPTOR;
|
||||||
|
//
|
||||||
|
// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-security_descriptor
|
||||||
type securityDescriptor struct {
|
type securityDescriptor struct {
|
||||||
Revision byte
|
Revision byte
|
||||||
Sbz1 byte
|
Sbz1 byte
|
||||||
|
@ -80,6 +114,8 @@ type win32Pipe struct {
|
||||||
path string
|
path string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var _ PipeConn = (*win32Pipe)(nil)
|
||||||
|
|
||||||
type win32MessageBytePipe struct {
|
type win32MessageBytePipe struct {
|
||||||
win32Pipe
|
win32Pipe
|
||||||
writeClosed bool
|
writeClosed bool
|
||||||
|
@ -103,6 +139,10 @@ func (f *win32Pipe) SetDeadline(t time.Time) error {
|
||||||
return f.SetWriteDeadline(t)
|
return f.SetWriteDeadline(t)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (f *win32Pipe) Disconnect() error {
|
||||||
|
return disconnectNamedPipe(f.win32File.handle)
|
||||||
|
}
|
||||||
|
|
||||||
// CloseWrite closes the write side of a message pipe in byte mode.
|
// CloseWrite closes the write side of a message pipe in byte mode.
|
||||||
func (f *win32MessageBytePipe) CloseWrite() error {
|
func (f *win32MessageBytePipe) CloseWrite() error {
|
||||||
if f.writeClosed {
|
if f.writeClosed {
|
||||||
|
@ -146,7 +186,7 @@ func (f *win32MessageBytePipe) Read(b []byte) (int, error) {
|
||||||
// zero-byte message, ensure that all future Read() calls
|
// zero-byte message, ensure that all future Read() calls
|
||||||
// also return EOF.
|
// also return EOF.
|
||||||
f.readEOF = true
|
f.readEOF = true
|
||||||
} else if err == syscall.ERROR_MORE_DATA { //nolint:errorlint // err is Errno
|
} else if err == windows.ERROR_MORE_DATA { //nolint:errorlint // err is Errno
|
||||||
// ERROR_MORE_DATA indicates that the pipe's read mode is message mode
|
// ERROR_MORE_DATA indicates that the pipe's read mode is message mode
|
||||||
// and the message still has more bytes. Treat this as a success, since
|
// and the message still has more bytes. Treat this as a success, since
|
||||||
// this package presents all named pipes as byte streams.
|
// this package presents all named pipes as byte streams.
|
||||||
|
@ -164,21 +204,20 @@ func (s pipeAddress) String() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
// tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout.
|
// tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout.
|
||||||
func tryDialPipe(ctx context.Context, path *string, access fs.AccessMask) (syscall.Handle, error) {
|
func tryDialPipe(ctx context.Context, path *string, access fs.AccessMask, impLevel PipeImpLevel) (windows.Handle, error) {
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return syscall.Handle(0), ctx.Err()
|
return windows.Handle(0), ctx.Err()
|
||||||
default:
|
default:
|
||||||
wh, err := fs.CreateFile(*path,
|
h, err := fs.CreateFile(*path,
|
||||||
access,
|
access,
|
||||||
0, // mode
|
0, // mode
|
||||||
nil, // security attributes
|
nil, // security attributes
|
||||||
fs.OPEN_EXISTING,
|
fs.OPEN_EXISTING,
|
||||||
fs.FILE_FLAG_OVERLAPPED|fs.SECURITY_SQOS_PRESENT|fs.SECURITY_ANONYMOUS,
|
fs.FILE_FLAG_OVERLAPPED|fs.SECURITY_SQOS_PRESENT|fs.FileSQSFlag(impLevel),
|
||||||
0, // template file handle
|
0, // template file handle
|
||||||
)
|
)
|
||||||
h := syscall.Handle(wh)
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return h, nil
|
return h, nil
|
||||||
}
|
}
|
||||||
|
@ -214,15 +253,33 @@ func DialPipe(path string, timeout *time.Duration) (net.Conn, error) {
|
||||||
// DialPipeContext attempts to connect to a named pipe by `path` until `ctx`
|
// DialPipeContext attempts to connect to a named pipe by `path` until `ctx`
|
||||||
// cancellation or timeout.
|
// cancellation or timeout.
|
||||||
func DialPipeContext(ctx context.Context, path string) (net.Conn, error) {
|
func DialPipeContext(ctx context.Context, path string) (net.Conn, error) {
|
||||||
return DialPipeAccess(ctx, path, syscall.GENERIC_READ|syscall.GENERIC_WRITE)
|
return DialPipeAccess(ctx, path, uint32(fs.GENERIC_READ|fs.GENERIC_WRITE))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PipeImpLevel is an enumeration of impersonation levels that may be set
|
||||||
|
// when calling DialPipeAccessImpersonation.
|
||||||
|
type PipeImpLevel uint32
|
||||||
|
|
||||||
|
const (
|
||||||
|
PipeImpLevelAnonymous = PipeImpLevel(fs.SECURITY_ANONYMOUS)
|
||||||
|
PipeImpLevelIdentification = PipeImpLevel(fs.SECURITY_IDENTIFICATION)
|
||||||
|
PipeImpLevelImpersonation = PipeImpLevel(fs.SECURITY_IMPERSONATION)
|
||||||
|
PipeImpLevelDelegation = PipeImpLevel(fs.SECURITY_DELEGATION)
|
||||||
|
)
|
||||||
|
|
||||||
// DialPipeAccess attempts to connect to a named pipe by `path` with `access` until `ctx`
|
// DialPipeAccess attempts to connect to a named pipe by `path` with `access` until `ctx`
|
||||||
// cancellation or timeout.
|
// cancellation or timeout.
|
||||||
func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn, error) {
|
func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn, error) {
|
||||||
|
return DialPipeAccessImpLevel(ctx, path, access, PipeImpLevelAnonymous)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DialPipeAccessImpLevel attempts to connect to a named pipe by `path` with
|
||||||
|
// `access` at `impLevel` until `ctx` cancellation or timeout. The other
|
||||||
|
// DialPipe* implementations use PipeImpLevelAnonymous.
|
||||||
|
func DialPipeAccessImpLevel(ctx context.Context, path string, access uint32, impLevel PipeImpLevel) (net.Conn, error) {
|
||||||
var err error
|
var err error
|
||||||
var h syscall.Handle
|
var h windows.Handle
|
||||||
h, err = tryDialPipe(ctx, &path, fs.AccessMask(access))
|
h, err = tryDialPipe(ctx, &path, fs.AccessMask(access), impLevel)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -235,7 +292,7 @@ func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn,
|
||||||
|
|
||||||
f, err := makeWin32File(h)
|
f, err := makeWin32File(h)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
syscall.Close(h)
|
windows.Close(h)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -255,7 +312,7 @@ type acceptResponse struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
type win32PipeListener struct {
|
type win32PipeListener struct {
|
||||||
firstHandle syscall.Handle
|
firstHandle windows.Handle
|
||||||
path string
|
path string
|
||||||
config PipeConfig
|
config PipeConfig
|
||||||
acceptCh chan (chan acceptResponse)
|
acceptCh chan (chan acceptResponse)
|
||||||
|
@ -263,8 +320,8 @@ type win32PipeListener struct {
|
||||||
doneCh chan int
|
doneCh chan int
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (syscall.Handle, error) {
|
func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (windows.Handle, error) {
|
||||||
path16, err := syscall.UTF16FromString(path)
|
path16, err := windows.UTF16FromString(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, &os.PathError{Op: "open", Path: path, Err: err}
|
return 0, &os.PathError{Op: "open", Path: path, Err: err}
|
||||||
}
|
}
|
||||||
|
@ -280,16 +337,20 @@ func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (sy
|
||||||
).Err(); err != nil {
|
).Err(); err != nil {
|
||||||
return 0, &os.PathError{Op: "open", Path: path, Err: err}
|
return 0, &os.PathError{Op: "open", Path: path, Err: err}
|
||||||
}
|
}
|
||||||
defer localFree(ntPath.Buffer)
|
defer windows.LocalFree(windows.Handle(ntPath.Buffer)) //nolint:errcheck
|
||||||
oa.ObjectName = &ntPath
|
oa.ObjectName = &ntPath
|
||||||
oa.Attributes = windows.OBJ_CASE_INSENSITIVE
|
oa.Attributes = windows.OBJ_CASE_INSENSITIVE
|
||||||
|
|
||||||
// The security descriptor is only needed for the first pipe.
|
// The security descriptor is only needed for the first pipe.
|
||||||
if first {
|
if first {
|
||||||
if sd != nil {
|
if sd != nil {
|
||||||
|
//todo: does `sdb` need to be allocated on the heap, or can go allocate it?
|
||||||
l := uint32(len(sd))
|
l := uint32(len(sd))
|
||||||
sdb := localAlloc(0, l)
|
sdb, err := windows.LocalAlloc(0, l)
|
||||||
defer localFree(sdb)
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("LocalAlloc for security descriptor with of length %d: %w", l, err)
|
||||||
|
}
|
||||||
|
defer windows.LocalFree(windows.Handle(sdb)) //nolint:errcheck
|
||||||
copy((*[0xffff]byte)(unsafe.Pointer(sdb))[:], sd)
|
copy((*[0xffff]byte)(unsafe.Pointer(sdb))[:], sd)
|
||||||
oa.SecurityDescriptor = (*securityDescriptor)(unsafe.Pointer(sdb))
|
oa.SecurityDescriptor = (*securityDescriptor)(unsafe.Pointer(sdb))
|
||||||
} else {
|
} else {
|
||||||
|
@ -298,7 +359,7 @@ func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (sy
|
||||||
if err := rtlDefaultNpAcl(&dacl).Err(); err != nil {
|
if err := rtlDefaultNpAcl(&dacl).Err(); err != nil {
|
||||||
return 0, fmt.Errorf("getting default named pipe ACL: %w", err)
|
return 0, fmt.Errorf("getting default named pipe ACL: %w", err)
|
||||||
}
|
}
|
||||||
defer localFree(dacl)
|
defer windows.LocalFree(windows.Handle(dacl)) //nolint:errcheck
|
||||||
|
|
||||||
sdb := &securityDescriptor{
|
sdb := &securityDescriptor{
|
||||||
Revision: 1,
|
Revision: 1,
|
||||||
|
@ -314,27 +375,27 @@ func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (sy
|
||||||
typ |= windows.FILE_PIPE_MESSAGE_TYPE
|
typ |= windows.FILE_PIPE_MESSAGE_TYPE
|
||||||
}
|
}
|
||||||
|
|
||||||
disposition := uint32(windows.FILE_OPEN)
|
disposition := fs.FILE_OPEN
|
||||||
access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | syscall.SYNCHRONIZE)
|
access := fs.GENERIC_READ | fs.GENERIC_WRITE | fs.SYNCHRONIZE
|
||||||
if first {
|
if first {
|
||||||
disposition = windows.FILE_CREATE
|
disposition = fs.FILE_CREATE
|
||||||
// By not asking for read or write access, the named pipe file system
|
// By not asking for read or write access, the named pipe file system
|
||||||
// will put this pipe into an initially disconnected state, blocking
|
// will put this pipe into an initially disconnected state, blocking
|
||||||
// client connections until the next call with first == false.
|
// client connections until the next call with first == false.
|
||||||
access = syscall.SYNCHRONIZE
|
access = fs.SYNCHRONIZE
|
||||||
}
|
}
|
||||||
|
|
||||||
timeout := int64(-50 * 10000) // 50ms
|
timeout := int64(-50 * 10000) // 50ms
|
||||||
|
|
||||||
var (
|
var (
|
||||||
h syscall.Handle
|
h windows.Handle
|
||||||
iosb ioStatusBlock
|
iosb ioStatusBlock
|
||||||
)
|
)
|
||||||
err = ntCreateNamedPipeFile(&h,
|
err = ntCreateNamedPipeFile(&h,
|
||||||
access,
|
access,
|
||||||
&oa,
|
&oa,
|
||||||
&iosb,
|
&iosb,
|
||||||
syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE,
|
fs.FILE_SHARE_READ|fs.FILE_SHARE_WRITE,
|
||||||
disposition,
|
disposition,
|
||||||
0,
|
0,
|
||||||
typ,
|
typ,
|
||||||
|
@ -359,7 +420,7 @@ func (l *win32PipeListener) makeServerPipe() (*win32File, error) {
|
||||||
}
|
}
|
||||||
f, err := makeWin32File(h)
|
f, err := makeWin32File(h)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
syscall.Close(h)
|
windows.Close(h)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return f, nil
|
return f, nil
|
||||||
|
@ -418,7 +479,7 @@ func (l *win32PipeListener) listenerRoutine() {
|
||||||
closed = err == ErrPipeListenerClosed //nolint:errorlint // err is Errno
|
closed = err == ErrPipeListenerClosed //nolint:errorlint // err is Errno
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
syscall.Close(l.firstHandle)
|
windows.Close(l.firstHandle)
|
||||||
l.firstHandle = 0
|
l.firstHandle = 0
|
||||||
// Notify Close() and Accept() callers that the handle has been closed.
|
// Notify Close() and Accept() callers that the handle has been closed.
|
||||||
close(l.doneCh)
|
close(l.doneCh)
|
||||||
|
|
|
@ -9,7 +9,6 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"runtime"
|
"runtime"
|
||||||
"sync"
|
"sync"
|
||||||
"syscall"
|
|
||||||
"unicode/utf16"
|
"unicode/utf16"
|
||||||
|
|
||||||
"golang.org/x/sys/windows"
|
"golang.org/x/sys/windows"
|
||||||
|
@ -18,8 +17,8 @@ import (
|
||||||
//sys adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) [true] = advapi32.AdjustTokenPrivileges
|
//sys adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) [true] = advapi32.AdjustTokenPrivileges
|
||||||
//sys impersonateSelf(level uint32) (err error) = advapi32.ImpersonateSelf
|
//sys impersonateSelf(level uint32) (err error) = advapi32.ImpersonateSelf
|
||||||
//sys revertToSelf() (err error) = advapi32.RevertToSelf
|
//sys revertToSelf() (err error) = advapi32.RevertToSelf
|
||||||
//sys openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) = advapi32.OpenThreadToken
|
//sys openThreadToken(thread windows.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) = advapi32.OpenThreadToken
|
||||||
//sys getCurrentThread() (h syscall.Handle) = GetCurrentThread
|
//sys getCurrentThread() (h windows.Handle) = GetCurrentThread
|
||||||
//sys lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) = advapi32.LookupPrivilegeValueW
|
//sys lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) = advapi32.LookupPrivilegeValueW
|
||||||
//sys lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) = advapi32.LookupPrivilegeNameW
|
//sys lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) = advapi32.LookupPrivilegeNameW
|
||||||
//sys lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) = advapi32.LookupPrivilegeDisplayNameW
|
//sys lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) = advapi32.LookupPrivilegeDisplayNameW
|
||||||
|
@ -29,7 +28,7 @@ const (
|
||||||
SE_PRIVILEGE_ENABLED = windows.SE_PRIVILEGE_ENABLED
|
SE_PRIVILEGE_ENABLED = windows.SE_PRIVILEGE_ENABLED
|
||||||
|
|
||||||
//revive:disable-next-line:var-naming ALL_CAPS
|
//revive:disable-next-line:var-naming ALL_CAPS
|
||||||
ERROR_NOT_ALL_ASSIGNED syscall.Errno = windows.ERROR_NOT_ALL_ASSIGNED
|
ERROR_NOT_ALL_ASSIGNED windows.Errno = windows.ERROR_NOT_ALL_ASSIGNED
|
||||||
|
|
||||||
SeBackupPrivilege = "SeBackupPrivilege"
|
SeBackupPrivilege = "SeBackupPrivilege"
|
||||||
SeRestorePrivilege = "SeRestorePrivilege"
|
SeRestorePrivilege = "SeRestorePrivilege"
|
||||||
|
@ -177,7 +176,7 @@ func newThreadToken() (windows.Token, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
var token windows.Token
|
var token windows.Token
|
||||||
err = openThreadToken(getCurrentThread(), syscall.TOKEN_ADJUST_PRIVILEGES|syscall.TOKEN_QUERY, false, &token)
|
err = openThreadToken(getCurrentThread(), windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, false, &token)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
rerr := revertToSelf()
|
rerr := revertToSelf()
|
||||||
if rerr != nil {
|
if rerr != nil {
|
||||||
|
|
|
@ -5,7 +5,7 @@ package winio
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"syscall"
|
"fmt"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
"golang.org/x/sys/windows"
|
"golang.org/x/sys/windows"
|
||||||
|
@ -15,10 +15,6 @@ import (
|
||||||
//sys lookupAccountSid(systemName *uint16, sid *byte, name *uint16, nameSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountSidW
|
//sys lookupAccountSid(systemName *uint16, sid *byte, name *uint16, nameSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountSidW
|
||||||
//sys convertSidToStringSid(sid *byte, str **uint16) (err error) = advapi32.ConvertSidToStringSidW
|
//sys convertSidToStringSid(sid *byte, str **uint16) (err error) = advapi32.ConvertSidToStringSidW
|
||||||
//sys convertStringSidToSid(str *uint16, sid **byte) (err error) = advapi32.ConvertStringSidToSidW
|
//sys convertStringSidToSid(str *uint16, sid **byte) (err error) = advapi32.ConvertStringSidToSidW
|
||||||
//sys convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) = advapi32.ConvertStringSecurityDescriptorToSecurityDescriptorW
|
|
||||||
//sys convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) = advapi32.ConvertSecurityDescriptorToStringSecurityDescriptorW
|
|
||||||
//sys localFree(mem uintptr) = LocalFree
|
|
||||||
//sys getSecurityDescriptorLength(sd uintptr) (len uint32) = advapi32.GetSecurityDescriptorLength
|
|
||||||
|
|
||||||
type AccountLookupError struct {
|
type AccountLookupError struct {
|
||||||
Name string
|
Name string
|
||||||
|
@ -64,7 +60,7 @@ func LookupSidByName(name string) (sid string, err error) {
|
||||||
|
|
||||||
var sidSize, sidNameUse, refDomainSize uint32
|
var sidSize, sidNameUse, refDomainSize uint32
|
||||||
err = lookupAccountName(nil, name, nil, &sidSize, nil, &refDomainSize, &sidNameUse)
|
err = lookupAccountName(nil, name, nil, &sidSize, nil, &refDomainSize, &sidNameUse)
|
||||||
if err != nil && err != syscall.ERROR_INSUFFICIENT_BUFFER { //nolint:errorlint // err is Errno
|
if err != nil && err != windows.ERROR_INSUFFICIENT_BUFFER { //nolint:errorlint // err is Errno
|
||||||
return "", &AccountLookupError{name, err}
|
return "", &AccountLookupError{name, err}
|
||||||
}
|
}
|
||||||
sidBuffer := make([]byte, sidSize)
|
sidBuffer := make([]byte, sidSize)
|
||||||
|
@ -78,8 +74,8 @@ func LookupSidByName(name string) (sid string, err error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", &AccountLookupError{name, err}
|
return "", &AccountLookupError{name, err}
|
||||||
}
|
}
|
||||||
sid = syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(strBuffer))[:])
|
sid = windows.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(strBuffer))[:])
|
||||||
localFree(uintptr(unsafe.Pointer(strBuffer)))
|
_, _ = windows.LocalFree(windows.Handle(unsafe.Pointer(strBuffer)))
|
||||||
return sid, nil
|
return sid, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -100,7 +96,7 @@ func LookupNameBySid(sid string) (name string, err error) {
|
||||||
if err = convertStringSidToSid(sidBuffer, &sidPtr); err != nil {
|
if err = convertStringSidToSid(sidBuffer, &sidPtr); err != nil {
|
||||||
return "", &AccountLookupError{sid, err}
|
return "", &AccountLookupError{sid, err}
|
||||||
}
|
}
|
||||||
defer localFree(uintptr(unsafe.Pointer(sidPtr)))
|
defer windows.LocalFree(windows.Handle(unsafe.Pointer(sidPtr))) //nolint:errcheck
|
||||||
|
|
||||||
var nameSize, refDomainSize, sidNameUse uint32
|
var nameSize, refDomainSize, sidNameUse uint32
|
||||||
err = lookupAccountSid(nil, sidPtr, nil, &nameSize, nil, &refDomainSize, &sidNameUse)
|
err = lookupAccountSid(nil, sidPtr, nil, &nameSize, nil, &refDomainSize, &sidNameUse)
|
||||||
|
@ -120,25 +116,18 @@ func LookupNameBySid(sid string) (name string, err error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func SddlToSecurityDescriptor(sddl string) ([]byte, error) {
|
func SddlToSecurityDescriptor(sddl string) ([]byte, error) {
|
||||||
var sdBuffer uintptr
|
sd, err := windows.SecurityDescriptorFromString(sddl)
|
||||||
err := convertStringSecurityDescriptorToSecurityDescriptor(sddl, 1, &sdBuffer, nil)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, &SddlConversionError{sddl, err}
|
return nil, &SddlConversionError{Sddl: sddl, Err: err}
|
||||||
}
|
}
|
||||||
defer localFree(sdBuffer)
|
b := unsafe.Slice((*byte)(unsafe.Pointer(sd)), sd.Length())
|
||||||
sd := make([]byte, getSecurityDescriptorLength(sdBuffer))
|
return b, nil
|
||||||
copy(sd, (*[0xffff]byte)(unsafe.Pointer(sdBuffer))[:len(sd)])
|
|
||||||
return sd, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func SecurityDescriptorToSddl(sd []byte) (string, error) {
|
func SecurityDescriptorToSddl(sd []byte) (string, error) {
|
||||||
var sddl *uint16
|
if l := int(unsafe.Sizeof(windows.SECURITY_DESCRIPTOR{})); len(sd) < l {
|
||||||
// The returned string length seems to include an arbitrary number of terminating NULs.
|
return "", fmt.Errorf("SecurityDescriptor (%d) smaller than expected (%d): %w", len(sd), l, windows.ERROR_INCORRECT_SIZE)
|
||||||
// Don't use it.
|
|
||||||
err := convertSecurityDescriptorToStringSecurityDescriptor(&sd[0], 1, 0xff, &sddl, nil)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
}
|
||||||
defer localFree(uintptr(unsafe.Pointer(sddl)))
|
s := (*windows.SECURITY_DESCRIPTOR)(unsafe.Pointer(&sd[0]))
|
||||||
return syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(sddl))[:]), nil
|
return s.String(), nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,5 +0,0 @@
|
||||||
//go:build tools
|
|
||||||
|
|
||||||
package winio
|
|
||||||
|
|
||||||
import _ "golang.org/x/tools/cmd/stringer"
|
|
|
@ -33,9 +33,6 @@ func errnoErr(e syscall.Errno) error {
|
||||||
case errnoERROR_IO_PENDING:
|
case errnoERROR_IO_PENDING:
|
||||||
return errERROR_IO_PENDING
|
return errERROR_IO_PENDING
|
||||||
}
|
}
|
||||||
// TODO: add more here, after collecting data on the common
|
|
||||||
// error values see on Windows. (perhaps when running
|
|
||||||
// all.bat?)
|
|
||||||
return e
|
return e
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -50,7 +47,7 @@ var (
|
||||||
)
|
)
|
||||||
|
|
||||||
func attachVirtualDisk(handle syscall.Handle, securityDescriptor *uintptr, attachVirtualDiskFlag uint32, providerSpecificFlags uint32, parameters *AttachVirtualDiskParameters, overlapped *syscall.Overlapped) (win32err error) {
|
func attachVirtualDisk(handle syscall.Handle, securityDescriptor *uintptr, attachVirtualDiskFlag uint32, providerSpecificFlags uint32, parameters *AttachVirtualDiskParameters, overlapped *syscall.Overlapped) (win32err error) {
|
||||||
r0, _, _ := syscall.Syscall6(procAttachVirtualDisk.Addr(), 6, uintptr(handle), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(attachVirtualDiskFlag), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(overlapped)))
|
r0, _, _ := syscall.SyscallN(procAttachVirtualDisk.Addr(), uintptr(handle), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(attachVirtualDiskFlag), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(overlapped)))
|
||||||
if r0 != 0 {
|
if r0 != 0 {
|
||||||
win32err = syscall.Errno(r0)
|
win32err = syscall.Errno(r0)
|
||||||
}
|
}
|
||||||
|
@ -67,7 +64,7 @@ func createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virt
|
||||||
}
|
}
|
||||||
|
|
||||||
func _createVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (win32err error) {
|
func _createVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (win32err error) {
|
||||||
r0, _, _ := syscall.Syscall9(procCreateVirtualDisk.Addr(), 9, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(createVirtualDiskFlags), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(handle)))
|
r0, _, _ := syscall.SyscallN(procCreateVirtualDisk.Addr(), uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(createVirtualDiskFlags), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(handle)))
|
||||||
if r0 != 0 {
|
if r0 != 0 {
|
||||||
win32err = syscall.Errno(r0)
|
win32err = syscall.Errno(r0)
|
||||||
}
|
}
|
||||||
|
@ -75,7 +72,7 @@ func _createVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, vi
|
||||||
}
|
}
|
||||||
|
|
||||||
func detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, providerSpecificFlags uint32) (win32err error) {
|
func detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, providerSpecificFlags uint32) (win32err error) {
|
||||||
r0, _, _ := syscall.Syscall(procDetachVirtualDisk.Addr(), 3, uintptr(handle), uintptr(detachVirtualDiskFlags), uintptr(providerSpecificFlags))
|
r0, _, _ := syscall.SyscallN(procDetachVirtualDisk.Addr(), uintptr(handle), uintptr(detachVirtualDiskFlags), uintptr(providerSpecificFlags))
|
||||||
if r0 != 0 {
|
if r0 != 0 {
|
||||||
win32err = syscall.Errno(r0)
|
win32err = syscall.Errno(r0)
|
||||||
}
|
}
|
||||||
|
@ -83,7 +80,7 @@ func detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, pro
|
||||||
}
|
}
|
||||||
|
|
||||||
func getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint32, buffer *uint16) (win32err error) {
|
func getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint32, buffer *uint16) (win32err error) {
|
||||||
r0, _, _ := syscall.Syscall(procGetVirtualDiskPhysicalPath.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(diskPathSizeInBytes)), uintptr(unsafe.Pointer(buffer)))
|
r0, _, _ := syscall.SyscallN(procGetVirtualDiskPhysicalPath.Addr(), uintptr(handle), uintptr(unsafe.Pointer(diskPathSizeInBytes)), uintptr(unsafe.Pointer(buffer)))
|
||||||
if r0 != 0 {
|
if r0 != 0 {
|
||||||
win32err = syscall.Errno(r0)
|
win32err = syscall.Errno(r0)
|
||||||
}
|
}
|
||||||
|
@ -100,7 +97,7 @@ func openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtua
|
||||||
}
|
}
|
||||||
|
|
||||||
func _openVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) {
|
func _openVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) {
|
||||||
r0, _, _ := syscall.Syscall6(procOpenVirtualDisk.Addr(), 6, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(openVirtualDiskFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle)))
|
r0, _, _ := syscall.SyscallN(procOpenVirtualDisk.Addr(), uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(openVirtualDiskFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle)))
|
||||||
if r0 != 0 {
|
if r0 != 0 {
|
||||||
win32err = syscall.Errno(r0)
|
win32err = syscall.Errno(r0)
|
||||||
}
|
}
|
||||||
|
|
|
@ -33,9 +33,6 @@ func errnoErr(e syscall.Errno) error {
|
||||||
case errnoERROR_IO_PENDING:
|
case errnoERROR_IO_PENDING:
|
||||||
return errERROR_IO_PENDING
|
return errERROR_IO_PENDING
|
||||||
}
|
}
|
||||||
// TODO: add more here, after collecting data on the common
|
|
||||||
// error values see on Windows. (perhaps when running
|
|
||||||
// all.bat?)
|
|
||||||
return e
|
return e
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -45,38 +42,34 @@ var (
|
||||||
modntdll = windows.NewLazySystemDLL("ntdll.dll")
|
modntdll = windows.NewLazySystemDLL("ntdll.dll")
|
||||||
modws2_32 = windows.NewLazySystemDLL("ws2_32.dll")
|
modws2_32 = windows.NewLazySystemDLL("ws2_32.dll")
|
||||||
|
|
||||||
procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges")
|
procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges")
|
||||||
procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW")
|
procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW")
|
||||||
procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW")
|
procConvertStringSidToSidW = modadvapi32.NewProc("ConvertStringSidToSidW")
|
||||||
procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW")
|
procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf")
|
||||||
procConvertStringSidToSidW = modadvapi32.NewProc("ConvertStringSidToSidW")
|
procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW")
|
||||||
procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength")
|
procLookupAccountSidW = modadvapi32.NewProc("LookupAccountSidW")
|
||||||
procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf")
|
procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW")
|
||||||
procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW")
|
procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW")
|
||||||
procLookupAccountSidW = modadvapi32.NewProc("LookupAccountSidW")
|
procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW")
|
||||||
procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW")
|
procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken")
|
||||||
procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW")
|
procRevertToSelf = modadvapi32.NewProc("RevertToSelf")
|
||||||
procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW")
|
procBackupRead = modkernel32.NewProc("BackupRead")
|
||||||
procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken")
|
procBackupWrite = modkernel32.NewProc("BackupWrite")
|
||||||
procRevertToSelf = modadvapi32.NewProc("RevertToSelf")
|
procCancelIoEx = modkernel32.NewProc("CancelIoEx")
|
||||||
procBackupRead = modkernel32.NewProc("BackupRead")
|
procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe")
|
||||||
procBackupWrite = modkernel32.NewProc("BackupWrite")
|
procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort")
|
||||||
procCancelIoEx = modkernel32.NewProc("CancelIoEx")
|
procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW")
|
||||||
procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe")
|
procDisconnectNamedPipe = modkernel32.NewProc("DisconnectNamedPipe")
|
||||||
procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort")
|
procGetCurrentThread = modkernel32.NewProc("GetCurrentThread")
|
||||||
procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW")
|
procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW")
|
||||||
procGetCurrentThread = modkernel32.NewProc("GetCurrentThread")
|
procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo")
|
||||||
procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW")
|
procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus")
|
||||||
procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo")
|
procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes")
|
||||||
procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus")
|
procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile")
|
||||||
procLocalAlloc = modkernel32.NewProc("LocalAlloc")
|
procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl")
|
||||||
procLocalFree = modkernel32.NewProc("LocalFree")
|
procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U")
|
||||||
procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes")
|
procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb")
|
||||||
procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile")
|
procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult")
|
||||||
procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl")
|
|
||||||
procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U")
|
|
||||||
procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb")
|
|
||||||
procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult")
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) {
|
func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) {
|
||||||
|
@ -84,7 +77,7 @@ func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, ou
|
||||||
if releaseAll {
|
if releaseAll {
|
||||||
_p0 = 1
|
_p0 = 1
|
||||||
}
|
}
|
||||||
r0, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize)))
|
r0, _, e1 := syscall.SyscallN(procAdjustTokenPrivileges.Addr(), uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize)))
|
||||||
success = r0 != 0
|
success = r0 != 0
|
||||||
if true {
|
if true {
|
||||||
err = errnoErr(e1)
|
err = errnoErr(e1)
|
||||||
|
@ -92,33 +85,8 @@ func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, ou
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) {
|
|
||||||
r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(secInfo), uintptr(unsafe.Pointer(sddl)), uintptr(unsafe.Pointer(sddlSize)), 0)
|
|
||||||
if r1 == 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func convertSidToStringSid(sid *byte, str **uint16) (err error) {
|
func convertSidToStringSid(sid *byte, str **uint16) (err error) {
|
||||||
r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str)), 0)
|
r1, _, e1 := syscall.SyscallN(procConvertSidToStringSidW.Addr(), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str)))
|
||||||
if r1 == 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) {
|
|
||||||
var _p0 *uint16
|
|
||||||
_p0, err = syscall.UTF16PtrFromString(str)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size)
|
|
||||||
}
|
|
||||||
|
|
||||||
func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd *uintptr, size *uint32) (err error) {
|
|
||||||
r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0)
|
|
||||||
if r1 == 0 {
|
if r1 == 0 {
|
||||||
err = errnoErr(e1)
|
err = errnoErr(e1)
|
||||||
}
|
}
|
||||||
|
@ -126,21 +94,15 @@ func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision
|
||||||
}
|
}
|
||||||
|
|
||||||
func convertStringSidToSid(str *uint16, sid **byte) (err error) {
|
func convertStringSidToSid(str *uint16, sid **byte) (err error) {
|
||||||
r1, _, e1 := syscall.Syscall(procConvertStringSidToSidW.Addr(), 2, uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(sid)), 0)
|
r1, _, e1 := syscall.SyscallN(procConvertStringSidToSidW.Addr(), uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(sid)))
|
||||||
if r1 == 0 {
|
if r1 == 0 {
|
||||||
err = errnoErr(e1)
|
err = errnoErr(e1)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func getSecurityDescriptorLength(sd uintptr) (len uint32) {
|
|
||||||
r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(sd), 0, 0)
|
|
||||||
len = uint32(r0)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func impersonateSelf(level uint32) (err error) {
|
func impersonateSelf(level uint32) (err error) {
|
||||||
r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(level), 0, 0)
|
r1, _, e1 := syscall.SyscallN(procImpersonateSelf.Addr(), uintptr(level))
|
||||||
if r1 == 0 {
|
if r1 == 0 {
|
||||||
err = errnoErr(e1)
|
err = errnoErr(e1)
|
||||||
}
|
}
|
||||||
|
@ -157,7 +119,7 @@ func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSiz
|
||||||
}
|
}
|
||||||
|
|
||||||
func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
|
func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
|
||||||
r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0)
|
r1, _, e1 := syscall.SyscallN(procLookupAccountNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)))
|
||||||
if r1 == 0 {
|
if r1 == 0 {
|
||||||
err = errnoErr(e1)
|
err = errnoErr(e1)
|
||||||
}
|
}
|
||||||
|
@ -165,7 +127,7 @@ func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidS
|
||||||
}
|
}
|
||||||
|
|
||||||
func lookupAccountSid(systemName *uint16, sid *byte, name *uint16, nameSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
|
func lookupAccountSid(systemName *uint16, sid *byte, name *uint16, nameSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
|
||||||
r1, _, e1 := syscall.Syscall9(procLookupAccountSidW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0)
|
r1, _, e1 := syscall.SyscallN(procLookupAccountSidW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)))
|
||||||
if r1 == 0 {
|
if r1 == 0 {
|
||||||
err = errnoErr(e1)
|
err = errnoErr(e1)
|
||||||
}
|
}
|
||||||
|
@ -182,7 +144,7 @@ func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16,
|
||||||
}
|
}
|
||||||
|
|
||||||
func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) {
|
func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) {
|
||||||
r1, _, e1 := syscall.Syscall6(procLookupPrivilegeDisplayNameW.Addr(), 5, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)), 0)
|
r1, _, e1 := syscall.SyscallN(procLookupPrivilegeDisplayNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)))
|
||||||
if r1 == 0 {
|
if r1 == 0 {
|
||||||
err = errnoErr(e1)
|
err = errnoErr(e1)
|
||||||
}
|
}
|
||||||
|
@ -199,7 +161,7 @@ func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *
|
||||||
}
|
}
|
||||||
|
|
||||||
func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) {
|
func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) {
|
||||||
r1, _, e1 := syscall.Syscall6(procLookupPrivilegeNameW.Addr(), 4, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), 0, 0)
|
r1, _, e1 := syscall.SyscallN(procLookupPrivilegeNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)))
|
||||||
if r1 == 0 {
|
if r1 == 0 {
|
||||||
err = errnoErr(e1)
|
err = errnoErr(e1)
|
||||||
}
|
}
|
||||||
|
@ -221,19 +183,19 @@ func lookupPrivilegeValue(systemName string, name string, luid *uint64) (err err
|
||||||
}
|
}
|
||||||
|
|
||||||
func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) {
|
func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) {
|
||||||
r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid)))
|
r1, _, e1 := syscall.SyscallN(procLookupPrivilegeValueW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid)))
|
||||||
if r1 == 0 {
|
if r1 == 0 {
|
||||||
err = errnoErr(e1)
|
err = errnoErr(e1)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) {
|
func openThreadToken(thread windows.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) {
|
||||||
var _p0 uint32
|
var _p0 uint32
|
||||||
if openAsSelf {
|
if openAsSelf {
|
||||||
_p0 = 1
|
_p0 = 1
|
||||||
}
|
}
|
||||||
r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0)
|
r1, _, e1 := syscall.SyscallN(procOpenThreadToken.Addr(), uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)))
|
||||||
if r1 == 0 {
|
if r1 == 0 {
|
||||||
err = errnoErr(e1)
|
err = errnoErr(e1)
|
||||||
}
|
}
|
||||||
|
@ -241,14 +203,14 @@ func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
func revertToSelf() (err error) {
|
func revertToSelf() (err error) {
|
||||||
r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0)
|
r1, _, e1 := syscall.SyscallN(procRevertToSelf.Addr())
|
||||||
if r1 == 0 {
|
if r1 == 0 {
|
||||||
err = errnoErr(e1)
|
err = errnoErr(e1)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) {
|
func backupRead(h windows.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) {
|
||||||
var _p0 *byte
|
var _p0 *byte
|
||||||
if len(b) > 0 {
|
if len(b) > 0 {
|
||||||
_p0 = &b[0]
|
_p0 = &b[0]
|
||||||
|
@ -261,14 +223,14 @@ func backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, proce
|
||||||
if processSecurity {
|
if processSecurity {
|
||||||
_p2 = 1
|
_p2 = 1
|
||||||
}
|
}
|
||||||
r1, _, e1 := syscall.Syscall9(procBackupRead.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0)
|
r1, _, e1 := syscall.SyscallN(procBackupRead.Addr(), uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)))
|
||||||
if r1 == 0 {
|
if r1 == 0 {
|
||||||
err = errnoErr(e1)
|
err = errnoErr(e1)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) {
|
func backupWrite(h windows.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) {
|
||||||
var _p0 *byte
|
var _p0 *byte
|
||||||
if len(b) > 0 {
|
if len(b) > 0 {
|
||||||
_p0 = &b[0]
|
_p0 = &b[0]
|
||||||
|
@ -281,39 +243,39 @@ func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, p
|
||||||
if processSecurity {
|
if processSecurity {
|
||||||
_p2 = 1
|
_p2 = 1
|
||||||
}
|
}
|
||||||
r1, _, e1 := syscall.Syscall9(procBackupWrite.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0)
|
r1, _, e1 := syscall.SyscallN(procBackupWrite.Addr(), uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)))
|
||||||
if r1 == 0 {
|
if r1 == 0 {
|
||||||
err = errnoErr(e1)
|
err = errnoErr(e1)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) {
|
func cancelIoEx(file windows.Handle, o *windows.Overlapped) (err error) {
|
||||||
r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(file), uintptr(unsafe.Pointer(o)), 0)
|
r1, _, e1 := syscall.SyscallN(procCancelIoEx.Addr(), uintptr(file), uintptr(unsafe.Pointer(o)))
|
||||||
if r1 == 0 {
|
if r1 == 0 {
|
||||||
err = errnoErr(e1)
|
err = errnoErr(e1)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) {
|
func connectNamedPipe(pipe windows.Handle, o *windows.Overlapped) (err error) {
|
||||||
r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0)
|
r1, _, e1 := syscall.SyscallN(procConnectNamedPipe.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(o)))
|
||||||
if r1 == 0 {
|
if r1 == 0 {
|
||||||
err = errnoErr(e1)
|
err = errnoErr(e1)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) {
|
func createIoCompletionPort(file windows.Handle, port windows.Handle, key uintptr, threadCount uint32) (newport windows.Handle, err error) {
|
||||||
r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0)
|
r0, _, e1 := syscall.SyscallN(procCreateIoCompletionPort.Addr(), uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount))
|
||||||
newport = syscall.Handle(r0)
|
newport = windows.Handle(r0)
|
||||||
if newport == 0 {
|
if newport == 0 {
|
||||||
err = errnoErr(e1)
|
err = errnoErr(e1)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) {
|
func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *windows.SecurityAttributes) (handle windows.Handle, err error) {
|
||||||
var _p0 *uint16
|
var _p0 *uint16
|
||||||
_p0, err = syscall.UTF16PtrFromString(name)
|
_p0, err = syscall.UTF16PtrFromString(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -322,96 +284,93 @@ func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances ui
|
||||||
return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa)
|
return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa)
|
||||||
}
|
}
|
||||||
|
|
||||||
func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) {
|
func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *windows.SecurityAttributes) (handle windows.Handle, err error) {
|
||||||
r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0)
|
r0, _, e1 := syscall.SyscallN(procCreateNamedPipeW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)))
|
||||||
handle = syscall.Handle(r0)
|
handle = windows.Handle(r0)
|
||||||
if handle == syscall.InvalidHandle {
|
if handle == windows.InvalidHandle {
|
||||||
err = errnoErr(e1)
|
err = errnoErr(e1)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func getCurrentThread() (h syscall.Handle) {
|
func disconnectNamedPipe(pipe windows.Handle) (err error) {
|
||||||
r0, _, _ := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0)
|
r1, _, e1 := syscall.SyscallN(procDisconnectNamedPipe.Addr(), uintptr(pipe))
|
||||||
h = syscall.Handle(r0)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) {
|
|
||||||
r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0)
|
|
||||||
if r1 == 0 {
|
if r1 == 0 {
|
||||||
err = errnoErr(e1)
|
err = errnoErr(e1)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) {
|
func getCurrentThread() (h windows.Handle) {
|
||||||
r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0)
|
r0, _, _ := syscall.SyscallN(procGetCurrentThread.Addr())
|
||||||
|
h = windows.Handle(r0)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func getNamedPipeHandleState(pipe windows.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) {
|
||||||
|
r1, _, e1 := syscall.SyscallN(procGetNamedPipeHandleStateW.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize))
|
||||||
if r1 == 0 {
|
if r1 == 0 {
|
||||||
err = errnoErr(e1)
|
err = errnoErr(e1)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) {
|
func getNamedPipeInfo(pipe windows.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) {
|
||||||
r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout), 0)
|
r1, _, e1 := syscall.SyscallN(procGetNamedPipeInfo.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)))
|
||||||
if r1 == 0 {
|
if r1 == 0 {
|
||||||
err = errnoErr(e1)
|
err = errnoErr(e1)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func localAlloc(uFlags uint32, length uint32) (ptr uintptr) {
|
func getQueuedCompletionStatus(port windows.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) {
|
||||||
r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(uFlags), uintptr(length), 0)
|
r1, _, e1 := syscall.SyscallN(procGetQueuedCompletionStatus.Addr(), uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout))
|
||||||
ptr = uintptr(r0)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func localFree(mem uintptr) {
|
|
||||||
syscall.Syscall(procLocalFree.Addr(), 1, uintptr(mem), 0, 0)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) {
|
|
||||||
r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(h), uintptr(flags), 0)
|
|
||||||
if r1 == 0 {
|
if r1 == 0 {
|
||||||
err = errnoErr(e1)
|
err = errnoErr(e1)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) {
|
func setFileCompletionNotificationModes(h windows.Handle, flags uint8) (err error) {
|
||||||
r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0)
|
r1, _, e1 := syscall.SyscallN(procSetFileCompletionNotificationModes.Addr(), uintptr(h), uintptr(flags))
|
||||||
|
if r1 == 0 {
|
||||||
|
err = errnoErr(e1)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func ntCreateNamedPipeFile(pipe *windows.Handle, access ntAccessMask, oa *objectAttributes, iosb *ioStatusBlock, share ntFileShareMode, disposition ntFileCreationDisposition, options ntFileOptions, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) {
|
||||||
|
r0, _, _ := syscall.SyscallN(procNtCreateNamedPipeFile.Addr(), uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)))
|
||||||
status = ntStatus(r0)
|
status = ntStatus(r0)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func rtlDefaultNpAcl(dacl *uintptr) (status ntStatus) {
|
func rtlDefaultNpAcl(dacl *uintptr) (status ntStatus) {
|
||||||
r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(dacl)), 0, 0)
|
r0, _, _ := syscall.SyscallN(procRtlDefaultNpAcl.Addr(), uintptr(unsafe.Pointer(dacl)))
|
||||||
status = ntStatus(r0)
|
status = ntStatus(r0)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntStatus) {
|
func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntStatus) {
|
||||||
r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved), 0, 0)
|
r0, _, _ := syscall.SyscallN(procRtlDosPathNameToNtPathName_U.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved))
|
||||||
status = ntStatus(r0)
|
status = ntStatus(r0)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func rtlNtStatusToDosError(status ntStatus) (winerr error) {
|
func rtlNtStatusToDosError(status ntStatus) (winerr error) {
|
||||||
r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0)
|
r0, _, _ := syscall.SyscallN(procRtlNtStatusToDosErrorNoTeb.Addr(), uintptr(status))
|
||||||
if r0 != 0 {
|
if r0 != 0 {
|
||||||
winerr = syscall.Errno(r0)
|
winerr = syscall.Errno(r0)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) {
|
func wsaGetOverlappedResult(h windows.Handle, o *windows.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) {
|
||||||
var _p0 uint32
|
var _p0 uint32
|
||||||
if wait {
|
if wait {
|
||||||
_p0 = 1
|
_p0 = 1
|
||||||
}
|
}
|
||||||
r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0)
|
r1, _, e1 := syscall.SyscallN(procWSAGetOverlappedResult.Addr(), uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)))
|
||||||
if r1 == 0 {
|
if r1 == 0 {
|
||||||
err = errnoErr(e1)
|
err = errnoErr(e1)
|
||||||
}
|
}
|
||||||
|
|
|
@ -33,9 +33,6 @@ func errnoErr(e syscall.Errno) error {
|
||||||
case errnoERROR_IO_PENDING:
|
case errnoERROR_IO_PENDING:
|
||||||
return errERROR_IO_PENDING
|
return errERROR_IO_PENDING
|
||||||
}
|
}
|
||||||
// TODO: add more here, after collecting data on the common
|
|
||||||
// error values see on Windows. (perhaps when running
|
|
||||||
// all.bat?)
|
|
||||||
return e
|
return e
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -75,7 +72,7 @@ func _hcsAttachLayerStorageFilter(layerPath *uint16, layerData *uint16) (hr erro
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall(procHcsAttachLayerStorageFilter.Addr(), 2, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(layerData)), 0)
|
r0, _, _ := syscall.SyscallN(procHcsAttachLayerStorageFilter.Addr(), uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(layerData)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -104,7 +101,7 @@ func _hcsAttachOverlayFilter(volumePath *uint16, layerData *uint16) (hr error) {
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall(procHcsAttachOverlayFilter.Addr(), 2, uintptr(unsafe.Pointer(volumePath)), uintptr(unsafe.Pointer(layerData)), 0)
|
r0, _, _ := syscall.SyscallN(procHcsAttachOverlayFilter.Addr(), uintptr(unsafe.Pointer(volumePath)), uintptr(unsafe.Pointer(layerData)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -128,7 +125,7 @@ func _hcsDestroyLayer(layerPath *uint16) (hr error) {
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall(procHcsDestroyLayer.Addr(), 1, uintptr(unsafe.Pointer(layerPath)), 0, 0)
|
r0, _, _ := syscall.SyscallN(procHcsDestroyLayer.Addr(), uintptr(unsafe.Pointer(layerPath)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -152,7 +149,7 @@ func _hcsDetachLayerStorageFilter(layerPath *uint16) (hr error) {
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall(procHcsDetachLayerStorageFilter.Addr(), 1, uintptr(unsafe.Pointer(layerPath)), 0, 0)
|
r0, _, _ := syscall.SyscallN(procHcsDetachLayerStorageFilter.Addr(), uintptr(unsafe.Pointer(layerPath)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -181,7 +178,7 @@ func _hcsDetachOverlayFilter(volumePath *uint16, layerData *uint16) (hr error) {
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall(procHcsDetachOverlayFilter.Addr(), 2, uintptr(unsafe.Pointer(volumePath)), uintptr(unsafe.Pointer(layerData)), 0)
|
r0, _, _ := syscall.SyscallN(procHcsDetachOverlayFilter.Addr(), uintptr(unsafe.Pointer(volumePath)), uintptr(unsafe.Pointer(layerData)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -220,7 +217,7 @@ func _hcsExportLayer(layerPath *uint16, exportFolderPath *uint16, layerData *uin
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall6(procHcsExportLayer.Addr(), 4, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(exportFolderPath)), uintptr(unsafe.Pointer(layerData)), uintptr(unsafe.Pointer(options)), 0, 0)
|
r0, _, _ := syscall.SyscallN(procHcsExportLayer.Addr(), uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(exportFolderPath)), uintptr(unsafe.Pointer(layerData)), uintptr(unsafe.Pointer(options)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -235,7 +232,7 @@ func hcsFormatWritableLayerVhd(handle windows.Handle) (hr error) {
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall(procHcsFormatWritableLayerVhd.Addr(), 1, uintptr(handle), 0, 0)
|
r0, _, _ := syscall.SyscallN(procHcsFormatWritableLayerVhd.Addr(), uintptr(handle))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -250,7 +247,7 @@ func hcsGetLayerVhdMountPath(vhdHandle windows.Handle, mountPath **uint16) (hr e
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall(procHcsGetLayerVhdMountPath.Addr(), 2, uintptr(vhdHandle), uintptr(unsafe.Pointer(mountPath)), 0)
|
r0, _, _ := syscall.SyscallN(procHcsGetLayerVhdMountPath.Addr(), uintptr(vhdHandle), uintptr(unsafe.Pointer(mountPath)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -284,7 +281,7 @@ func _hcsImportLayer(layerPath *uint16, sourceFolderPath *uint16, layerData *uin
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall(procHcsImportLayer.Addr(), 3, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(sourceFolderPath)), uintptr(unsafe.Pointer(layerData)))
|
r0, _, _ := syscall.SyscallN(procHcsImportLayer.Addr(), uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(sourceFolderPath)), uintptr(unsafe.Pointer(layerData)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -318,7 +315,7 @@ func _hcsInitializeWritableLayer(writableLayerPath *uint16, layerData *uint16, o
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall(procHcsInitializeWritableLayer.Addr(), 3, uintptr(unsafe.Pointer(writableLayerPath)), uintptr(unsafe.Pointer(layerData)), uintptr(unsafe.Pointer(options)))
|
r0, _, _ := syscall.SyscallN(procHcsInitializeWritableLayer.Addr(), uintptr(unsafe.Pointer(writableLayerPath)), uintptr(unsafe.Pointer(layerData)), uintptr(unsafe.Pointer(options)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -347,7 +344,7 @@ func _hcsSetupBaseOSLayer(layerPath *uint16, handle windows.Handle, options *uin
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall(procHcsSetupBaseOSLayer.Addr(), 3, uintptr(unsafe.Pointer(layerPath)), uintptr(handle), uintptr(unsafe.Pointer(options)))
|
r0, _, _ := syscall.SyscallN(procHcsSetupBaseOSLayer.Addr(), uintptr(unsafe.Pointer(layerPath)), uintptr(handle), uintptr(unsafe.Pointer(options)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -381,7 +378,7 @@ func _hcsSetupBaseOSVolume(layerPath *uint16, volumePath *uint16, options *uint1
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall(procHcsSetupBaseOSVolume.Addr(), 3, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(volumePath)), uintptr(unsafe.Pointer(options)))
|
r0, _, _ := syscall.SyscallN(procHcsSetupBaseOSVolume.Addr(), uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(volumePath)), uintptr(unsafe.Pointer(options)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
|
|
@ -14,14 +14,14 @@ import (
|
||||||
"golang.org/x/sys/windows"
|
"golang.org/x/sys/windows"
|
||||||
)
|
)
|
||||||
|
|
||||||
// makeOpenFiles calls winio.MakeOpenFile for each handle in a slice but closes all the handles
|
// makeOpenFiles calls winio.NewOpenFile for each handle in a slice but closes all the handles
|
||||||
// if there is an error.
|
// if there is an error.
|
||||||
func makeOpenFiles(hs []syscall.Handle) (_ []io.ReadWriteCloser, err error) {
|
func makeOpenFiles(hs []syscall.Handle) (_ []io.ReadWriteCloser, err error) {
|
||||||
fs := make([]io.ReadWriteCloser, len(hs))
|
fs := make([]io.ReadWriteCloser, len(hs))
|
||||||
for i, h := range hs {
|
for i, h := range hs {
|
||||||
if h != syscall.Handle(0) {
|
if h != syscall.Handle(0) {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
fs[i], err = winio.MakeOpenFile(h)
|
fs[i], err = winio.NewOpenFile(windows.Handle(h))
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
syscall.Close(h)
|
syscall.Close(h)
|
||||||
|
|
|
@ -33,9 +33,6 @@ func errnoErr(e syscall.Errno) error {
|
||||||
case errnoERROR_IO_PENDING:
|
case errnoERROR_IO_PENDING:
|
||||||
return errERROR_IO_PENDING
|
return errERROR_IO_PENDING
|
||||||
}
|
}
|
||||||
// TODO: add more here, after collecting data on the common
|
|
||||||
// error values see on Windows. (perhaps when running
|
|
||||||
// all.bat?)
|
|
||||||
return e
|
return e
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -69,7 +66,7 @@ func __hnsCall(method *uint16, path *uint16, object *uint16, response **uint16)
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall6(procHNSCall.Addr(), 4, uintptr(unsafe.Pointer(method)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(object)), uintptr(unsafe.Pointer(response)), 0, 0)
|
r0, _, _ := syscall.SyscallN(procHNSCall.Addr(), uintptr(unsafe.Pointer(method)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(object)), uintptr(unsafe.Pointer(response)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
|
|
@ -33,9 +33,6 @@ func errnoErr(e syscall.Errno) error {
|
||||||
case errnoERROR_IO_PENDING:
|
case errnoERROR_IO_PENDING:
|
||||||
return errERROR_IO_PENDING
|
return errERROR_IO_PENDING
|
||||||
}
|
}
|
||||||
// TODO: add more here, after collecting data on the common
|
|
||||||
// error values see on Windows. (perhaps when running
|
|
||||||
// all.bat?)
|
|
||||||
return e
|
return e
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -46,6 +43,6 @@ var (
|
||||||
)
|
)
|
||||||
|
|
||||||
func coTaskMemFree(buffer unsafe.Pointer) {
|
func coTaskMemFree(buffer unsafe.Pointer) {
|
||||||
syscall.Syscall(procCoTaskMemFree.Addr(), 1, uintptr(buffer), 0, 0)
|
syscall.SyscallN(procCoTaskMemFree.Addr(), uintptr(buffer))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
@ -33,9 +33,6 @@ func errnoErr(e syscall.Errno) error {
|
||||||
case errnoERROR_IO_PENDING:
|
case errnoERROR_IO_PENDING:
|
||||||
return errERROR_IO_PENDING
|
return errERROR_IO_PENDING
|
||||||
}
|
}
|
||||||
// TODO: add more here, after collecting data on the common
|
|
||||||
// error values see on Windows. (perhaps when running
|
|
||||||
// all.bat?)
|
|
||||||
return e
|
return e
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -48,7 +45,7 @@ var (
|
||||||
)
|
)
|
||||||
|
|
||||||
func getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidOwner **uintptr, ppsidGroup **uintptr, ppDacl *uintptr, ppSacl *uintptr, ppSecurityDescriptor *uintptr) (win32err error) {
|
func getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidOwner **uintptr, ppsidGroup **uintptr, ppDacl *uintptr, ppSacl *uintptr, ppSecurityDescriptor *uintptr) (win32err error) {
|
||||||
r0, _, _ := syscall.Syscall9(procGetSecurityInfo.Addr(), 8, uintptr(handle), uintptr(objectType), uintptr(si), uintptr(unsafe.Pointer(ppsidOwner)), uintptr(unsafe.Pointer(ppsidGroup)), uintptr(unsafe.Pointer(ppDacl)), uintptr(unsafe.Pointer(ppSacl)), uintptr(unsafe.Pointer(ppSecurityDescriptor)), 0)
|
r0, _, _ := syscall.SyscallN(procGetSecurityInfo.Addr(), uintptr(handle), uintptr(objectType), uintptr(si), uintptr(unsafe.Pointer(ppsidOwner)), uintptr(unsafe.Pointer(ppsidGroup)), uintptr(unsafe.Pointer(ppDacl)), uintptr(unsafe.Pointer(ppSacl)), uintptr(unsafe.Pointer(ppSecurityDescriptor)))
|
||||||
if r0 != 0 {
|
if r0 != 0 {
|
||||||
win32err = syscall.Errno(r0)
|
win32err = syscall.Errno(r0)
|
||||||
}
|
}
|
||||||
|
@ -56,7 +53,7 @@ func getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidO
|
||||||
}
|
}
|
||||||
|
|
||||||
func setEntriesInAcl(count uintptr, pListOfEEs uintptr, oldAcl uintptr, newAcl *uintptr) (win32err error) {
|
func setEntriesInAcl(count uintptr, pListOfEEs uintptr, oldAcl uintptr, newAcl *uintptr) (win32err error) {
|
||||||
r0, _, _ := syscall.Syscall6(procSetEntriesInAclW.Addr(), 4, uintptr(count), uintptr(pListOfEEs), uintptr(oldAcl), uintptr(unsafe.Pointer(newAcl)), 0, 0)
|
r0, _, _ := syscall.SyscallN(procSetEntriesInAclW.Addr(), uintptr(count), uintptr(pListOfEEs), uintptr(oldAcl), uintptr(unsafe.Pointer(newAcl)))
|
||||||
if r0 != 0 {
|
if r0 != 0 {
|
||||||
win32err = syscall.Errno(r0)
|
win32err = syscall.Errno(r0)
|
||||||
}
|
}
|
||||||
|
@ -64,7 +61,7 @@ func setEntriesInAcl(count uintptr, pListOfEEs uintptr, oldAcl uintptr, newAcl *
|
||||||
}
|
}
|
||||||
|
|
||||||
func setSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, psidOwner uintptr, psidGroup uintptr, pDacl uintptr, pSacl uintptr) (win32err error) {
|
func setSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, psidOwner uintptr, psidGroup uintptr, pDacl uintptr, pSacl uintptr) (win32err error) {
|
||||||
r0, _, _ := syscall.Syscall9(procSetSecurityInfo.Addr(), 7, uintptr(handle), uintptr(objectType), uintptr(si), uintptr(psidOwner), uintptr(psidGroup), uintptr(pDacl), uintptr(pSacl), 0, 0)
|
r0, _, _ := syscall.SyscallN(procSetSecurityInfo.Addr(), uintptr(handle), uintptr(objectType), uintptr(si), uintptr(psidOwner), uintptr(psidGroup), uintptr(pDacl), uintptr(pSacl))
|
||||||
if r0 != 0 {
|
if r0 != 0 {
|
||||||
win32err = syscall.Errno(r0)
|
win32err = syscall.Errno(r0)
|
||||||
}
|
}
|
||||||
|
|
|
@ -33,9 +33,6 @@ func errnoErr(e syscall.Errno) error {
|
||||||
case errnoERROR_IO_PENDING:
|
case errnoERROR_IO_PENDING:
|
||||||
return errERROR_IO_PENDING
|
return errERROR_IO_PENDING
|
||||||
}
|
}
|
||||||
// TODO: add more here, after collecting data on the common
|
|
||||||
// error values see on Windows. (perhaps when running
|
|
||||||
// all.bat?)
|
|
||||||
return e
|
return e
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -75,7 +72,7 @@ func hcsCloseComputeSystem(computeSystem HcsSystem) (hr error) {
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall(procHcsCloseComputeSystem.Addr(), 1, uintptr(computeSystem), 0, 0)
|
r0, _, _ := syscall.SyscallN(procHcsCloseComputeSystem.Addr(), uintptr(computeSystem))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -90,7 +87,7 @@ func hcsCloseProcess(process HcsProcess) (hr error) {
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall(procHcsCloseProcess.Addr(), 1, uintptr(process), 0, 0)
|
r0, _, _ := syscall.SyscallN(procHcsCloseProcess.Addr(), uintptr(process))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -119,7 +116,7 @@ func _hcsCreateComputeSystem(id *uint16, configuration *uint16, identity syscall
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall6(procHcsCreateComputeSystem.Addr(), 5, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(configuration)), uintptr(identity), uintptr(unsafe.Pointer(computeSystem)), uintptr(unsafe.Pointer(result)), 0)
|
r0, _, _ := syscall.SyscallN(procHcsCreateComputeSystem.Addr(), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(configuration)), uintptr(identity), uintptr(unsafe.Pointer(computeSystem)), uintptr(unsafe.Pointer(result)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -143,7 +140,7 @@ func _hcsCreateProcess(computeSystem HcsSystem, processParameters *uint16, proce
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall6(procHcsCreateProcess.Addr(), 5, uintptr(computeSystem), uintptr(unsafe.Pointer(processParameters)), uintptr(unsafe.Pointer(processInformation)), uintptr(unsafe.Pointer(process)), uintptr(unsafe.Pointer(result)), 0)
|
r0, _, _ := syscall.SyscallN(procHcsCreateProcess.Addr(), uintptr(computeSystem), uintptr(unsafe.Pointer(processParameters)), uintptr(unsafe.Pointer(processInformation)), uintptr(unsafe.Pointer(process)), uintptr(unsafe.Pointer(result)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -167,7 +164,7 @@ func _hcsEnumerateComputeSystems(query *uint16, computeSystems **uint16, result
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall(procHcsEnumerateComputeSystems.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(computeSystems)), uintptr(unsafe.Pointer(result)))
|
r0, _, _ := syscall.SyscallN(procHcsEnumerateComputeSystems.Addr(), uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(computeSystems)), uintptr(unsafe.Pointer(result)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -191,7 +188,7 @@ func _hcsGetComputeSystemProperties(computeSystem HcsSystem, propertyQuery *uint
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall6(procHcsGetComputeSystemProperties.Addr(), 4, uintptr(computeSystem), uintptr(unsafe.Pointer(propertyQuery)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0)
|
r0, _, _ := syscall.SyscallN(procHcsGetComputeSystemProperties.Addr(), uintptr(computeSystem), uintptr(unsafe.Pointer(propertyQuery)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -206,7 +203,7 @@ func hcsGetProcessInfo(process HcsProcess, processInformation *HcsProcessInforma
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall(procHcsGetProcessInfo.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(processInformation)), uintptr(unsafe.Pointer(result)))
|
r0, _, _ := syscall.SyscallN(procHcsGetProcessInfo.Addr(), uintptr(process), uintptr(unsafe.Pointer(processInformation)), uintptr(unsafe.Pointer(result)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -221,7 +218,7 @@ func hcsGetProcessProperties(process HcsProcess, processProperties **uint16, res
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall(procHcsGetProcessProperties.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(processProperties)), uintptr(unsafe.Pointer(result)))
|
r0, _, _ := syscall.SyscallN(procHcsGetProcessProperties.Addr(), uintptr(process), uintptr(unsafe.Pointer(processProperties)), uintptr(unsafe.Pointer(result)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -245,7 +242,7 @@ func _hcsGetServiceProperties(propertyQuery *uint16, properties **uint16, result
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall(procHcsGetServiceProperties.Addr(), 3, uintptr(unsafe.Pointer(propertyQuery)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)))
|
r0, _, _ := syscall.SyscallN(procHcsGetServiceProperties.Addr(), uintptr(unsafe.Pointer(propertyQuery)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -269,7 +266,7 @@ func _hcsModifyComputeSystem(computeSystem HcsSystem, configuration *uint16, res
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall(procHcsModifyComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(configuration)), uintptr(unsafe.Pointer(result)))
|
r0, _, _ := syscall.SyscallN(procHcsModifyComputeSystem.Addr(), uintptr(computeSystem), uintptr(unsafe.Pointer(configuration)), uintptr(unsafe.Pointer(result)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -293,7 +290,7 @@ func _hcsModifyProcess(process HcsProcess, settings *uint16, result **uint16) (h
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall(procHcsModifyProcess.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result)))
|
r0, _, _ := syscall.SyscallN(procHcsModifyProcess.Addr(), uintptr(process), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -317,7 +314,7 @@ func _hcsModifyServiceSettings(settings *uint16, result **uint16) (hr error) {
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall(procHcsModifyServiceSettings.Addr(), 2, uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result)), 0)
|
r0, _, _ := syscall.SyscallN(procHcsModifyServiceSettings.Addr(), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -341,7 +338,7 @@ func _hcsOpenComputeSystem(id *uint16, computeSystem *HcsSystem, result **uint16
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall(procHcsOpenComputeSystem.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(computeSystem)), uintptr(unsafe.Pointer(result)))
|
r0, _, _ := syscall.SyscallN(procHcsOpenComputeSystem.Addr(), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(computeSystem)), uintptr(unsafe.Pointer(result)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -356,7 +353,7 @@ func hcsOpenProcess(computeSystem HcsSystem, pid uint32, process *HcsProcess, re
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall6(procHcsOpenProcess.Addr(), 4, uintptr(computeSystem), uintptr(pid), uintptr(unsafe.Pointer(process)), uintptr(unsafe.Pointer(result)), 0, 0)
|
r0, _, _ := syscall.SyscallN(procHcsOpenProcess.Addr(), uintptr(computeSystem), uintptr(pid), uintptr(unsafe.Pointer(process)), uintptr(unsafe.Pointer(result)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -380,7 +377,7 @@ func _hcsPauseComputeSystem(computeSystem HcsSystem, options *uint16, result **u
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall(procHcsPauseComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
|
r0, _, _ := syscall.SyscallN(procHcsPauseComputeSystem.Addr(), uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -395,7 +392,7 @@ func hcsRegisterComputeSystemCallback(computeSystem HcsSystem, callback uintptr,
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall6(procHcsRegisterComputeSystemCallback.Addr(), 4, uintptr(computeSystem), uintptr(callback), uintptr(context), uintptr(unsafe.Pointer(callbackHandle)), 0, 0)
|
r0, _, _ := syscall.SyscallN(procHcsRegisterComputeSystemCallback.Addr(), uintptr(computeSystem), uintptr(callback), uintptr(context), uintptr(unsafe.Pointer(callbackHandle)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -410,7 +407,7 @@ func hcsRegisterProcessCallback(process HcsProcess, callback uintptr, context ui
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall6(procHcsRegisterProcessCallback.Addr(), 4, uintptr(process), uintptr(callback), uintptr(context), uintptr(unsafe.Pointer(callbackHandle)), 0, 0)
|
r0, _, _ := syscall.SyscallN(procHcsRegisterProcessCallback.Addr(), uintptr(process), uintptr(callback), uintptr(context), uintptr(unsafe.Pointer(callbackHandle)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -434,7 +431,7 @@ func _hcsResumeComputeSystem(computeSystem HcsSystem, options *uint16, result **
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall(procHcsResumeComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
|
r0, _, _ := syscall.SyscallN(procHcsResumeComputeSystem.Addr(), uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -458,7 +455,7 @@ func _hcsSaveComputeSystem(computeSystem HcsSystem, options *uint16, result **ui
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall(procHcsSaveComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
|
r0, _, _ := syscall.SyscallN(procHcsSaveComputeSystem.Addr(), uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -482,7 +479,7 @@ func _hcsShutdownComputeSystem(computeSystem HcsSystem, options *uint16, result
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall(procHcsShutdownComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
|
r0, _, _ := syscall.SyscallN(procHcsShutdownComputeSystem.Addr(), uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -506,7 +503,7 @@ func _hcsSignalProcess(process HcsProcess, options *uint16, result **uint16) (hr
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall(procHcsSignalProcess.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
|
r0, _, _ := syscall.SyscallN(procHcsSignalProcess.Addr(), uintptr(process), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -530,7 +527,7 @@ func _hcsStartComputeSystem(computeSystem HcsSystem, options *uint16, result **u
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall(procHcsStartComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
|
r0, _, _ := syscall.SyscallN(procHcsStartComputeSystem.Addr(), uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -554,7 +551,7 @@ func _hcsTerminateComputeSystem(computeSystem HcsSystem, options *uint16, result
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall(procHcsTerminateComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
|
r0, _, _ := syscall.SyscallN(procHcsTerminateComputeSystem.Addr(), uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -569,7 +566,7 @@ func hcsTerminateProcess(process HcsProcess, result **uint16) (hr error) {
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall(procHcsTerminateProcess.Addr(), 2, uintptr(process), uintptr(unsafe.Pointer(result)), 0)
|
r0, _, _ := syscall.SyscallN(procHcsTerminateProcess.Addr(), uintptr(process), uintptr(unsafe.Pointer(result)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -584,7 +581,7 @@ func hcsUnregisterComputeSystemCallback(callbackHandle HcsCallback) (hr error) {
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall(procHcsUnregisterComputeSystemCallback.Addr(), 1, uintptr(callbackHandle), 0, 0)
|
r0, _, _ := syscall.SyscallN(procHcsUnregisterComputeSystemCallback.Addr(), uintptr(callbackHandle))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -599,7 +596,7 @@ func hcsUnregisterProcessCallback(callbackHandle HcsCallback) (hr error) {
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall(procHcsUnregisterProcessCallback.Addr(), 1, uintptr(callbackHandle), 0, 0)
|
r0, _, _ := syscall.SyscallN(procHcsUnregisterProcessCallback.Addr(), uintptr(callbackHandle))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
|
|
@ -33,9 +33,6 @@ func errnoErr(e syscall.Errno) error {
|
||||||
case errnoERROR_IO_PENDING:
|
case errnoERROR_IO_PENDING:
|
||||||
return errERROR_IO_PENDING
|
return errERROR_IO_PENDING
|
||||||
}
|
}
|
||||||
// TODO: add more here, after collecting data on the common
|
|
||||||
// error values see on Windows. (perhaps when running
|
|
||||||
// all.bat?)
|
|
||||||
return e
|
return e
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -77,7 +74,7 @@ func getDiskFreeSpaceEx(directoryName string, freeBytesAvailableToCaller *int64,
|
||||||
}
|
}
|
||||||
|
|
||||||
func _getDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *int64, totalNumberOfBytes *int64, totalNumberOfFreeBytes *int64) (err error) {
|
func _getDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *int64, totalNumberOfBytes *int64, totalNumberOfFreeBytes *int64) (err error) {
|
||||||
r1, _, e1 := syscall.Syscall6(procGetDiskFreeSpaceExW.Addr(), 4, uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes)), 0, 0)
|
r1, _, e1 := syscall.SyscallN(procGetDiskFreeSpaceExW.Addr(), uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes)))
|
||||||
if r1 == 0 {
|
if r1 == 0 {
|
||||||
err = errnoErr(e1)
|
err = errnoErr(e1)
|
||||||
}
|
}
|
||||||
|
@ -85,7 +82,7 @@ func _getDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *int6
|
||||||
}
|
}
|
||||||
|
|
||||||
func attachVirtualDisk(handle syscall.Handle, sd uintptr, flags uint32, providerFlags uint32, params uintptr, overlapped uintptr) (err error) {
|
func attachVirtualDisk(handle syscall.Handle, sd uintptr, flags uint32, providerFlags uint32, params uintptr, overlapped uintptr) (err error) {
|
||||||
r1, _, e1 := syscall.Syscall6(procAttachVirtualDisk.Addr(), 6, uintptr(handle), uintptr(sd), uintptr(flags), uintptr(providerFlags), uintptr(params), uintptr(overlapped))
|
r1, _, e1 := syscall.SyscallN(procAttachVirtualDisk.Addr(), uintptr(handle), uintptr(sd), uintptr(flags), uintptr(providerFlags), uintptr(params), uintptr(overlapped))
|
||||||
if r1 != 0 {
|
if r1 != 0 {
|
||||||
err = errnoErr(e1)
|
err = errnoErr(e1)
|
||||||
}
|
}
|
||||||
|
@ -102,7 +99,7 @@ func openVirtualDisk(virtualStorageType *virtualStorageType, path string, virtua
|
||||||
}
|
}
|
||||||
|
|
||||||
func _openVirtualDisk(virtualStorageType *virtualStorageType, path *uint16, virtualDiskAccessMask uint32, flags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (err error) {
|
func _openVirtualDisk(virtualStorageType *virtualStorageType, path *uint16, virtualDiskAccessMask uint32, flags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (err error) {
|
||||||
r1, _, e1 := syscall.Syscall6(procOpenVirtualDisk.Addr(), 6, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(flags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle)))
|
r1, _, e1 := syscall.SyscallN(procOpenVirtualDisk.Addr(), uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(flags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle)))
|
||||||
if r1 != 0 {
|
if r1 != 0 {
|
||||||
err = errnoErr(e1)
|
err = errnoErr(e1)
|
||||||
}
|
}
|
||||||
|
@ -123,7 +120,7 @@ func _activateLayer(info *driverInfo, id *uint16) (hr error) {
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall(procActivateLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0)
|
r0, _, _ := syscall.SyscallN(procActivateLayer.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -156,7 +153,7 @@ func _copyLayer(info *driverInfo, srcId *uint16, dstId *uint16, descriptors []WC
|
||||||
if len(descriptors) > 0 {
|
if len(descriptors) > 0 {
|
||||||
_p2 = &descriptors[0]
|
_p2 = &descriptors[0]
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall6(procCopyLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(srcId)), uintptr(unsafe.Pointer(dstId)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0)
|
r0, _, _ := syscall.SyscallN(procCopyLayer.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(srcId)), uintptr(unsafe.Pointer(dstId)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -185,7 +182,7 @@ func _createLayer(info *driverInfo, id *uint16, parent *uint16) (hr error) {
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall(procCreateLayer.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(parent)))
|
r0, _, _ := syscall.SyscallN(procCreateLayer.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(parent)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -213,7 +210,7 @@ func _createSandboxLayer(info *driverInfo, id *uint16, parent uintptr, descripto
|
||||||
if len(descriptors) > 0 {
|
if len(descriptors) > 0 {
|
||||||
_p1 = &descriptors[0]
|
_p1 = &descriptors[0]
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall6(procCreateSandboxLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(parent), uintptr(unsafe.Pointer(_p1)), uintptr(len(descriptors)), 0)
|
r0, _, _ := syscall.SyscallN(procCreateSandboxLayer.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(parent), uintptr(unsafe.Pointer(_p1)), uintptr(len(descriptors)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -237,7 +234,7 @@ func _deactivateLayer(info *driverInfo, id *uint16) (hr error) {
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall(procDeactivateLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0)
|
r0, _, _ := syscall.SyscallN(procDeactivateLayer.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -261,7 +258,7 @@ func _destroyLayer(info *driverInfo, id *uint16) (hr error) {
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall(procDestroyLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0)
|
r0, _, _ := syscall.SyscallN(procDestroyLayer.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -285,7 +282,7 @@ func _expandSandboxSize(info *driverInfo, id *uint16, size uint64) (hr error) {
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall(procExpandSandboxSize.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(size))
|
r0, _, _ := syscall.SyscallN(procExpandSandboxSize.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(size))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -318,7 +315,7 @@ func _exportLayer(info *driverInfo, id *uint16, path *uint16, descriptors []WC_L
|
||||||
if len(descriptors) > 0 {
|
if len(descriptors) > 0 {
|
||||||
_p2 = &descriptors[0]
|
_p2 = &descriptors[0]
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall6(procExportLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0)
|
r0, _, _ := syscall.SyscallN(procExportLayer.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -333,7 +330,7 @@ func getBaseImages(buffer **uint16) (hr error) {
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall(procGetBaseImages.Addr(), 1, uintptr(unsafe.Pointer(buffer)), 0, 0)
|
r0, _, _ := syscall.SyscallN(procGetBaseImages.Addr(), uintptr(unsafe.Pointer(buffer)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -357,7 +354,7 @@ func _getLayerMountPath(info *driverInfo, id *uint16, length *uintptr, buffer *u
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall6(procGetLayerMountPath.Addr(), 4, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(length)), uintptr(unsafe.Pointer(buffer)), 0, 0)
|
r0, _, _ := syscall.SyscallN(procGetLayerMountPath.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(length)), uintptr(unsafe.Pointer(buffer)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -386,7 +383,7 @@ func _grantVmAccess(vmid *uint16, filepath *uint16) (hr error) {
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall(procGrantVmAccess.Addr(), 2, uintptr(unsafe.Pointer(vmid)), uintptr(unsafe.Pointer(filepath)), 0)
|
r0, _, _ := syscall.SyscallN(procGrantVmAccess.Addr(), uintptr(unsafe.Pointer(vmid)), uintptr(unsafe.Pointer(filepath)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -419,7 +416,7 @@ func _importLayer(info *driverInfo, id *uint16, path *uint16, descriptors []WC_L
|
||||||
if len(descriptors) > 0 {
|
if len(descriptors) > 0 {
|
||||||
_p2 = &descriptors[0]
|
_p2 = &descriptors[0]
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall6(procImportLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0)
|
r0, _, _ := syscall.SyscallN(procImportLayer.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -443,7 +440,7 @@ func _layerExists(info *driverInfo, id *uint16, exists *uint32) (hr error) {
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall(procLayerExists.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(exists)))
|
r0, _, _ := syscall.SyscallN(procLayerExists.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(exists)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -467,7 +464,7 @@ func _nameToGuid(name *uint16, guid *_guid) (hr error) {
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall(procNameToGuid.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(guid)), 0)
|
r0, _, _ := syscall.SyscallN(procNameToGuid.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(guid)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -495,7 +492,7 @@ func _prepareLayer(info *driverInfo, id *uint16, descriptors []WC_LAYER_DESCRIPT
|
||||||
if len(descriptors) > 0 {
|
if len(descriptors) > 0 {
|
||||||
_p1 = &descriptors[0]
|
_p1 = &descriptors[0]
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall6(procPrepareLayer.Addr(), 4, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(_p1)), uintptr(len(descriptors)), 0, 0)
|
r0, _, _ := syscall.SyscallN(procPrepareLayer.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(_p1)), uintptr(len(descriptors)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -519,7 +516,7 @@ func _processBaseImage(path *uint16) (hr error) {
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall(procProcessBaseImage.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0)
|
r0, _, _ := syscall.SyscallN(procProcessBaseImage.Addr(), uintptr(unsafe.Pointer(path)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -543,7 +540,7 @@ func _processUtilityImage(path *uint16) (hr error) {
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall(procProcessUtilityImage.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0)
|
r0, _, _ := syscall.SyscallN(procProcessUtilityImage.Addr(), uintptr(unsafe.Pointer(path)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -567,7 +564,7 @@ func _unprepareLayer(info *driverInfo, id *uint16) (hr error) {
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall(procUnprepareLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0)
|
r0, _, _ := syscall.SyscallN(procUnprepareLayer.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
|
|
@ -33,9 +33,6 @@ func errnoErr(e syscall.Errno) error {
|
||||||
case errnoERROR_IO_PENDING:
|
case errnoERROR_IO_PENDING:
|
||||||
return errERROR_IO_PENDING
|
return errERROR_IO_PENDING
|
||||||
}
|
}
|
||||||
// TODO: add more here, after collecting data on the common
|
|
||||||
// error values see on Windows. (perhaps when running
|
|
||||||
// all.bat?)
|
|
||||||
return e
|
return e
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -109,7 +106,7 @@ var (
|
||||||
)
|
)
|
||||||
|
|
||||||
func LogonUser(username *uint16, domain *uint16, password *uint16, logonType uint32, logonProvider uint32, token *windows.Token) (err error) {
|
func LogonUser(username *uint16, domain *uint16, password *uint16, logonType uint32, logonProvider uint32, token *windows.Token) (err error) {
|
||||||
r1, _, e1 := syscall.Syscall6(procLogonUserW.Addr(), 6, uintptr(unsafe.Pointer(username)), uintptr(unsafe.Pointer(domain)), uintptr(unsafe.Pointer(password)), uintptr(logonType), uintptr(logonProvider), uintptr(unsafe.Pointer(token)))
|
r1, _, e1 := syscall.SyscallN(procLogonUserW.Addr(), uintptr(unsafe.Pointer(username)), uintptr(unsafe.Pointer(domain)), uintptr(unsafe.Pointer(password)), uintptr(logonType), uintptr(logonProvider), uintptr(unsafe.Pointer(token)))
|
||||||
if r1 == 0 {
|
if r1 == 0 {
|
||||||
err = errnoErr(e1)
|
err = errnoErr(e1)
|
||||||
}
|
}
|
||||||
|
@ -121,7 +118,7 @@ func BfSetupFilter(jobHandle windows.Handle, flags uint32, virtRootPath *uint16,
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall6(procBfSetupFilter.Addr(), 6, uintptr(jobHandle), uintptr(flags), uintptr(unsafe.Pointer(virtRootPath)), uintptr(unsafe.Pointer(virtTargetPath)), uintptr(unsafe.Pointer(virtExceptions)), uintptr(virtExceptionPathCount))
|
r0, _, _ := syscall.SyscallN(procBfSetupFilter.Addr(), uintptr(jobHandle), uintptr(flags), uintptr(unsafe.Pointer(virtRootPath)), uintptr(unsafe.Pointer(virtTargetPath)), uintptr(unsafe.Pointer(virtExceptions)), uintptr(virtExceptionPathCount))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -132,7 +129,7 @@ func BfSetupFilter(jobHandle windows.Handle, flags uint32, virtRootPath *uint16,
|
||||||
}
|
}
|
||||||
|
|
||||||
func CMGetDevNodeProperty(dnDevInst uint32, propertyKey *DevPropKey, propertyType *uint32, propertyBuffer *uint16, propertyBufferSize *uint32, uFlags uint32) (hr error) {
|
func CMGetDevNodeProperty(dnDevInst uint32, propertyKey *DevPropKey, propertyType *uint32, propertyBuffer *uint16, propertyBufferSize *uint32, uFlags uint32) (hr error) {
|
||||||
r0, _, _ := syscall.Syscall6(procCM_Get_DevNode_PropertyW.Addr(), 6, uintptr(dnDevInst), uintptr(unsafe.Pointer(propertyKey)), uintptr(unsafe.Pointer(propertyType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(unsafe.Pointer(propertyBufferSize)), uintptr(uFlags))
|
r0, _, _ := syscall.SyscallN(procCM_Get_DevNode_PropertyW.Addr(), uintptr(dnDevInst), uintptr(unsafe.Pointer(propertyKey)), uintptr(unsafe.Pointer(propertyType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(unsafe.Pointer(propertyBufferSize)), uintptr(uFlags))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -143,7 +140,7 @@ func CMGetDevNodeProperty(dnDevInst uint32, propertyKey *DevPropKey, propertyTyp
|
||||||
}
|
}
|
||||||
|
|
||||||
func CMGetDeviceIDList(pszFilter *byte, buffer *byte, bufferLen uint32, uFlags uint32) (hr error) {
|
func CMGetDeviceIDList(pszFilter *byte, buffer *byte, bufferLen uint32, uFlags uint32) (hr error) {
|
||||||
r0, _, _ := syscall.Syscall6(procCM_Get_Device_ID_ListA.Addr(), 4, uintptr(unsafe.Pointer(pszFilter)), uintptr(unsafe.Pointer(buffer)), uintptr(bufferLen), uintptr(uFlags), 0, 0)
|
r0, _, _ := syscall.SyscallN(procCM_Get_Device_ID_ListA.Addr(), uintptr(unsafe.Pointer(pszFilter)), uintptr(unsafe.Pointer(buffer)), uintptr(bufferLen), uintptr(uFlags))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -154,7 +151,7 @@ func CMGetDeviceIDList(pszFilter *byte, buffer *byte, bufferLen uint32, uFlags u
|
||||||
}
|
}
|
||||||
|
|
||||||
func CMGetDeviceIDListSize(pulLen *uint32, pszFilter *byte, uFlags uint32) (hr error) {
|
func CMGetDeviceIDListSize(pulLen *uint32, pszFilter *byte, uFlags uint32) (hr error) {
|
||||||
r0, _, _ := syscall.Syscall(procCM_Get_Device_ID_List_SizeA.Addr(), 3, uintptr(unsafe.Pointer(pulLen)), uintptr(unsafe.Pointer(pszFilter)), uintptr(uFlags))
|
r0, _, _ := syscall.SyscallN(procCM_Get_Device_ID_List_SizeA.Addr(), uintptr(unsafe.Pointer(pulLen)), uintptr(unsafe.Pointer(pszFilter)), uintptr(uFlags))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -174,7 +171,7 @@ func CMLocateDevNode(pdnDevInst *uint32, pDeviceID string, uFlags uint32) (hr er
|
||||||
}
|
}
|
||||||
|
|
||||||
func _CMLocateDevNode(pdnDevInst *uint32, pDeviceID *uint16, uFlags uint32) (hr error) {
|
func _CMLocateDevNode(pdnDevInst *uint32, pDeviceID *uint16, uFlags uint32) (hr error) {
|
||||||
r0, _, _ := syscall.Syscall(procCM_Locate_DevNodeW.Addr(), 3, uintptr(unsafe.Pointer(pdnDevInst)), uintptr(unsafe.Pointer(pDeviceID)), uintptr(uFlags))
|
r0, _, _ := syscall.SyscallN(procCM_Locate_DevNodeW.Addr(), uintptr(unsafe.Pointer(pdnDevInst)), uintptr(unsafe.Pointer(pDeviceID)), uintptr(uFlags))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -189,7 +186,7 @@ func CimCloseImage(cimFSHandle FsHandle) (err error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
syscall.Syscall(procCimCloseImage.Addr(), 1, uintptr(cimFSHandle), 0, 0)
|
syscall.SyscallN(procCimCloseImage.Addr(), uintptr(cimFSHandle))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -198,7 +195,7 @@ func CimCloseStream(cimStreamHandle StreamHandle) (hr error) {
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall(procCimCloseStream.Addr(), 1, uintptr(cimStreamHandle), 0, 0)
|
r0, _, _ := syscall.SyscallN(procCimCloseStream.Addr(), uintptr(cimStreamHandle))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -213,7 +210,7 @@ func CimCommitImage(cimFSHandle FsHandle) (hr error) {
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall(procCimCommitImage.Addr(), 1, uintptr(cimFSHandle), 0, 0)
|
r0, _, _ := syscall.SyscallN(procCimCommitImage.Addr(), uintptr(cimFSHandle))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -237,7 +234,7 @@ func _CimCreateAlternateStream(cimFSHandle FsHandle, path *uint16, size uint64,
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall6(procCimCreateAlternateStream.Addr(), 4, uintptr(cimFSHandle), uintptr(unsafe.Pointer(path)), uintptr(size), uintptr(unsafe.Pointer(cimStreamHandle)), 0, 0)
|
r0, _, _ := syscall.SyscallN(procCimCreateAlternateStream.Addr(), uintptr(cimFSHandle), uintptr(unsafe.Pointer(path)), uintptr(size), uintptr(unsafe.Pointer(cimStreamHandle)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -261,7 +258,7 @@ func _CimCreateFile(cimFSHandle FsHandle, path *uint16, file *CimFsFileMetadata,
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall6(procCimCreateFile.Addr(), 4, uintptr(cimFSHandle), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(cimStreamHandle)), 0, 0)
|
r0, _, _ := syscall.SyscallN(procCimCreateFile.Addr(), uintptr(cimFSHandle), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(cimStreamHandle)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -290,7 +287,7 @@ func _CimCreateHardLink(cimFSHandle FsHandle, newPath *uint16, oldPath *uint16)
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall(procCimCreateHardLink.Addr(), 3, uintptr(cimFSHandle), uintptr(unsafe.Pointer(newPath)), uintptr(unsafe.Pointer(oldPath)))
|
r0, _, _ := syscall.SyscallN(procCimCreateHardLink.Addr(), uintptr(cimFSHandle), uintptr(unsafe.Pointer(newPath)), uintptr(unsafe.Pointer(oldPath)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -314,7 +311,7 @@ func _CimCreateImage(imagePath *uint16, oldFSName *uint16, newFSName *uint16, ci
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall6(procCimCreateImage.Addr(), 4, uintptr(unsafe.Pointer(imagePath)), uintptr(unsafe.Pointer(oldFSName)), uintptr(unsafe.Pointer(newFSName)), uintptr(unsafe.Pointer(cimFSHandle)), 0, 0)
|
r0, _, _ := syscall.SyscallN(procCimCreateImage.Addr(), uintptr(unsafe.Pointer(imagePath)), uintptr(unsafe.Pointer(oldFSName)), uintptr(unsafe.Pointer(newFSName)), uintptr(unsafe.Pointer(cimFSHandle)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -338,7 +335,7 @@ func _CimDeletePath(cimFSHandle FsHandle, path *uint16) (hr error) {
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall(procCimDeletePath.Addr(), 2, uintptr(cimFSHandle), uintptr(unsafe.Pointer(path)), 0)
|
r0, _, _ := syscall.SyscallN(procCimDeletePath.Addr(), uintptr(cimFSHandle), uintptr(unsafe.Pointer(path)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -353,7 +350,7 @@ func CimDismountImage(volumeID *g) (hr error) {
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall(procCimDismountImage.Addr(), 1, uintptr(unsafe.Pointer(volumeID)), 0, 0)
|
r0, _, _ := syscall.SyscallN(procCimDismountImage.Addr(), uintptr(unsafe.Pointer(volumeID)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -382,7 +379,7 @@ func _CimMountImage(imagePath *uint16, fsName *uint16, flags uint32, volumeID *g
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall6(procCimMountImage.Addr(), 4, uintptr(unsafe.Pointer(imagePath)), uintptr(unsafe.Pointer(fsName)), uintptr(flags), uintptr(unsafe.Pointer(volumeID)), 0, 0)
|
r0, _, _ := syscall.SyscallN(procCimMountImage.Addr(), uintptr(unsafe.Pointer(imagePath)), uintptr(unsafe.Pointer(fsName)), uintptr(flags), uintptr(unsafe.Pointer(volumeID)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -397,7 +394,7 @@ func CimWriteStream(cimStreamHandle StreamHandle, buffer uintptr, bufferSize uin
|
||||||
if hr != nil {
|
if hr != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall(procCimWriteStream.Addr(), 3, uintptr(cimStreamHandle), uintptr(buffer), uintptr(bufferSize))
|
r0, _, _ := syscall.SyscallN(procCimWriteStream.Addr(), uintptr(cimStreamHandle), uintptr(buffer), uintptr(bufferSize))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -408,7 +405,7 @@ func CimWriteStream(cimStreamHandle StreamHandle, buffer uintptr, bufferSize uin
|
||||||
}
|
}
|
||||||
|
|
||||||
func SetJobCompartmentId(handle windows.Handle, compartmentId uint32) (win32Err error) {
|
func SetJobCompartmentId(handle windows.Handle, compartmentId uint32) (win32Err error) {
|
||||||
r0, _, _ := syscall.Syscall(procSetJobCompartmentId.Addr(), 2, uintptr(handle), uintptr(compartmentId), 0)
|
r0, _, _ := syscall.SyscallN(procSetJobCompartmentId.Addr(), uintptr(handle), uintptr(compartmentId))
|
||||||
if r0 != 0 {
|
if r0 != 0 {
|
||||||
win32Err = syscall.Errno(r0)
|
win32Err = syscall.Errno(r0)
|
||||||
}
|
}
|
||||||
|
@ -416,12 +413,12 @@ func SetJobCompartmentId(handle windows.Handle, compartmentId uint32) (win32Err
|
||||||
}
|
}
|
||||||
|
|
||||||
func ClosePseudoConsole(hpc windows.Handle) {
|
func ClosePseudoConsole(hpc windows.Handle) {
|
||||||
syscall.Syscall(procClosePseudoConsole.Addr(), 1, uintptr(hpc), 0, 0)
|
syscall.SyscallN(procClosePseudoConsole.Addr(), uintptr(hpc))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func CopyFileW(existingFileName *uint16, newFileName *uint16, failIfExists int32) (err error) {
|
func CopyFileW(existingFileName *uint16, newFileName *uint16, failIfExists int32) (err error) {
|
||||||
r1, _, e1 := syscall.Syscall(procCopyFileW.Addr(), 3, uintptr(unsafe.Pointer(existingFileName)), uintptr(unsafe.Pointer(newFileName)), uintptr(failIfExists))
|
r1, _, e1 := syscall.SyscallN(procCopyFileW.Addr(), uintptr(unsafe.Pointer(existingFileName)), uintptr(unsafe.Pointer(newFileName)), uintptr(failIfExists))
|
||||||
if r1 == 0 {
|
if r1 == 0 {
|
||||||
err = errnoErr(e1)
|
err = errnoErr(e1)
|
||||||
}
|
}
|
||||||
|
@ -429,7 +426,7 @@ func CopyFileW(existingFileName *uint16, newFileName *uint16, failIfExists int32
|
||||||
}
|
}
|
||||||
|
|
||||||
func createPseudoConsole(size uint32, hInput windows.Handle, hOutput windows.Handle, dwFlags uint32, hpcon *windows.Handle) (hr error) {
|
func createPseudoConsole(size uint32, hInput windows.Handle, hOutput windows.Handle, dwFlags uint32, hpcon *windows.Handle) (hr error) {
|
||||||
r0, _, _ := syscall.Syscall6(procCreatePseudoConsole.Addr(), 5, uintptr(size), uintptr(hInput), uintptr(hOutput), uintptr(dwFlags), uintptr(unsafe.Pointer(hpcon)), 0)
|
r0, _, _ := syscall.SyscallN(procCreatePseudoConsole.Addr(), uintptr(size), uintptr(hInput), uintptr(hOutput), uintptr(dwFlags), uintptr(unsafe.Pointer(hpcon)))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -440,7 +437,7 @@ func createPseudoConsole(size uint32, hInput windows.Handle, hOutput windows.Han
|
||||||
}
|
}
|
||||||
|
|
||||||
func CreateRemoteThread(process windows.Handle, sa *windows.SecurityAttributes, stackSize uint32, startAddr uintptr, parameter uintptr, creationFlags uint32, threadID *uint32) (handle windows.Handle, err error) {
|
func CreateRemoteThread(process windows.Handle, sa *windows.SecurityAttributes, stackSize uint32, startAddr uintptr, parameter uintptr, creationFlags uint32, threadID *uint32) (handle windows.Handle, err error) {
|
||||||
r0, _, e1 := syscall.Syscall9(procCreateRemoteThread.Addr(), 7, uintptr(process), uintptr(unsafe.Pointer(sa)), uintptr(stackSize), uintptr(startAddr), uintptr(parameter), uintptr(creationFlags), uintptr(unsafe.Pointer(threadID)), 0, 0)
|
r0, _, e1 := syscall.SyscallN(procCreateRemoteThread.Addr(), uintptr(process), uintptr(unsafe.Pointer(sa)), uintptr(stackSize), uintptr(startAddr), uintptr(parameter), uintptr(creationFlags), uintptr(unsafe.Pointer(threadID)))
|
||||||
handle = windows.Handle(r0)
|
handle = windows.Handle(r0)
|
||||||
if handle == 0 {
|
if handle == 0 {
|
||||||
err = errnoErr(e1)
|
err = errnoErr(e1)
|
||||||
|
@ -449,13 +446,13 @@ func CreateRemoteThread(process windows.Handle, sa *windows.SecurityAttributes,
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetActiveProcessorCount(groupNumber uint16) (amount uint32) {
|
func GetActiveProcessorCount(groupNumber uint16) (amount uint32) {
|
||||||
r0, _, _ := syscall.Syscall(procGetActiveProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0)
|
r0, _, _ := syscall.SyscallN(procGetActiveProcessorCount.Addr(), uintptr(groupNumber))
|
||||||
amount = uint32(r0)
|
amount = uint32(r0)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *int32) (err error) {
|
func IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *int32) (err error) {
|
||||||
r1, _, e1 := syscall.Syscall(procIsProcessInJob.Addr(), 3, uintptr(procHandle), uintptr(jobHandle), uintptr(unsafe.Pointer(result)))
|
r1, _, e1 := syscall.SyscallN(procIsProcessInJob.Addr(), uintptr(procHandle), uintptr(jobHandle), uintptr(unsafe.Pointer(result)))
|
||||||
if r1 == 0 {
|
if r1 == 0 {
|
||||||
err = errnoErr(e1)
|
err = errnoErr(e1)
|
||||||
}
|
}
|
||||||
|
@ -463,18 +460,18 @@ func IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result
|
||||||
}
|
}
|
||||||
|
|
||||||
func LocalAlloc(flags uint32, size int) (ptr uintptr) {
|
func LocalAlloc(flags uint32, size int) (ptr uintptr) {
|
||||||
r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(flags), uintptr(size), 0)
|
r0, _, _ := syscall.SyscallN(procLocalAlloc.Addr(), uintptr(flags), uintptr(size))
|
||||||
ptr = uintptr(r0)
|
ptr = uintptr(r0)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func LocalFree(ptr uintptr) {
|
func LocalFree(ptr uintptr) {
|
||||||
syscall.Syscall(procLocalFree.Addr(), 1, uintptr(ptr), 0, 0)
|
syscall.SyscallN(procLocalFree.Addr(), uintptr(ptr))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func OpenJobObject(desiredAccess uint32, inheritHandle int32, lpName *uint16) (handle windows.Handle, err error) {
|
func OpenJobObject(desiredAccess uint32, inheritHandle int32, lpName *uint16) (handle windows.Handle, err error) {
|
||||||
r0, _, e1 := syscall.Syscall(procOpenJobObjectW.Addr(), 3, uintptr(desiredAccess), uintptr(inheritHandle), uintptr(unsafe.Pointer(lpName)))
|
r0, _, e1 := syscall.SyscallN(procOpenJobObjectW.Addr(), uintptr(desiredAccess), uintptr(inheritHandle), uintptr(unsafe.Pointer(lpName)))
|
||||||
handle = windows.Handle(r0)
|
handle = windows.Handle(r0)
|
||||||
if handle == 0 {
|
if handle == 0 {
|
||||||
err = errnoErr(e1)
|
err = errnoErr(e1)
|
||||||
|
@ -483,7 +480,7 @@ func OpenJobObject(desiredAccess uint32, inheritHandle int32, lpName *uint16) (h
|
||||||
}
|
}
|
||||||
|
|
||||||
func QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo unsafe.Pointer, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) {
|
func QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo unsafe.Pointer, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) {
|
||||||
r1, _, e1 := syscall.Syscall6(procQueryInformationJobObject.Addr(), 5, uintptr(jobHandle), uintptr(infoClass), uintptr(jobObjectInfo), uintptr(jobObjectInformationLength), uintptr(unsafe.Pointer(lpReturnLength)), 0)
|
r1, _, e1 := syscall.SyscallN(procQueryInformationJobObject.Addr(), uintptr(jobHandle), uintptr(infoClass), uintptr(jobObjectInfo), uintptr(jobObjectInformationLength), uintptr(unsafe.Pointer(lpReturnLength)))
|
||||||
if r1 == 0 {
|
if r1 == 0 {
|
||||||
err = errnoErr(e1)
|
err = errnoErr(e1)
|
||||||
}
|
}
|
||||||
|
@ -491,7 +488,7 @@ func QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobOb
|
||||||
}
|
}
|
||||||
|
|
||||||
func QueryIoRateControlInformationJobObject(jobHandle windows.Handle, volumeName *uint16, ioRateControlInfo **JOBOBJECT_IO_RATE_CONTROL_INFORMATION, infoBlockCount *uint32) (ret uint32, err error) {
|
func QueryIoRateControlInformationJobObject(jobHandle windows.Handle, volumeName *uint16, ioRateControlInfo **JOBOBJECT_IO_RATE_CONTROL_INFORMATION, infoBlockCount *uint32) (ret uint32, err error) {
|
||||||
r0, _, e1 := syscall.Syscall6(procQueryIoRateControlInformationJobObject.Addr(), 4, uintptr(jobHandle), uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(ioRateControlInfo)), uintptr(unsafe.Pointer(infoBlockCount)), 0, 0)
|
r0, _, e1 := syscall.SyscallN(procQueryIoRateControlInformationJobObject.Addr(), uintptr(jobHandle), uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(ioRateControlInfo)), uintptr(unsafe.Pointer(infoBlockCount)))
|
||||||
ret = uint32(r0)
|
ret = uint32(r0)
|
||||||
if ret == 0 {
|
if ret == 0 {
|
||||||
err = errnoErr(e1)
|
err = errnoErr(e1)
|
||||||
|
@ -500,7 +497,7 @@ func QueryIoRateControlInformationJobObject(jobHandle windows.Handle, volumeName
|
||||||
}
|
}
|
||||||
|
|
||||||
func resizePseudoConsole(hPc windows.Handle, size uint32) (hr error) {
|
func resizePseudoConsole(hPc windows.Handle, size uint32) (hr error) {
|
||||||
r0, _, _ := syscall.Syscall(procResizePseudoConsole.Addr(), 2, uintptr(hPc), uintptr(size), 0)
|
r0, _, _ := syscall.SyscallN(procResizePseudoConsole.Addr(), uintptr(hPc), uintptr(size))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
@ -511,7 +508,7 @@ func resizePseudoConsole(hPc windows.Handle, size uint32) (hr error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func SearchPath(lpPath *uint16, lpFileName *uint16, lpExtension *uint16, nBufferLength uint32, lpBuffer *uint16, lpFilePath *uint16) (size uint32, err error) {
|
func SearchPath(lpPath *uint16, lpFileName *uint16, lpExtension *uint16, nBufferLength uint32, lpBuffer *uint16, lpFilePath *uint16) (size uint32, err error) {
|
||||||
r0, _, e1 := syscall.Syscall6(procSearchPathW.Addr(), 6, uintptr(unsafe.Pointer(lpPath)), uintptr(unsafe.Pointer(lpFileName)), uintptr(unsafe.Pointer(lpExtension)), uintptr(nBufferLength), uintptr(unsafe.Pointer(lpBuffer)), uintptr(unsafe.Pointer(lpFilePath)))
|
r0, _, e1 := syscall.SyscallN(procSearchPathW.Addr(), uintptr(unsafe.Pointer(lpPath)), uintptr(unsafe.Pointer(lpFileName)), uintptr(unsafe.Pointer(lpExtension)), uintptr(nBufferLength), uintptr(unsafe.Pointer(lpBuffer)), uintptr(unsafe.Pointer(lpFilePath)))
|
||||||
size = uint32(r0)
|
size = uint32(r0)
|
||||||
if size == 0 {
|
if size == 0 {
|
||||||
err = errnoErr(e1)
|
err = errnoErr(e1)
|
||||||
|
@ -520,7 +517,7 @@ func SearchPath(lpPath *uint16, lpFileName *uint16, lpExtension *uint16, nBuffer
|
||||||
}
|
}
|
||||||
|
|
||||||
func SetIoRateControlInformationJobObject(jobHandle windows.Handle, ioRateControlInfo *JOBOBJECT_IO_RATE_CONTROL_INFORMATION) (ret uint32, err error) {
|
func SetIoRateControlInformationJobObject(jobHandle windows.Handle, ioRateControlInfo *JOBOBJECT_IO_RATE_CONTROL_INFORMATION) (ret uint32, err error) {
|
||||||
r0, _, e1 := syscall.Syscall(procSetIoRateControlInformationJobObject.Addr(), 2, uintptr(jobHandle), uintptr(unsafe.Pointer(ioRateControlInfo)), 0)
|
r0, _, e1 := syscall.SyscallN(procSetIoRateControlInformationJobObject.Addr(), uintptr(jobHandle), uintptr(unsafe.Pointer(ioRateControlInfo)))
|
||||||
ret = uint32(r0)
|
ret = uint32(r0)
|
||||||
if ret == 0 {
|
if ret == 0 {
|
||||||
err = errnoErr(e1)
|
err = errnoErr(e1)
|
||||||
|
@ -529,7 +526,7 @@ func SetIoRateControlInformationJobObject(jobHandle windows.Handle, ioRateContro
|
||||||
}
|
}
|
||||||
|
|
||||||
func netLocalGroupAddMembers(serverName *uint16, groupName *uint16, level uint32, buf *byte, totalEntries uint32) (status error) {
|
func netLocalGroupAddMembers(serverName *uint16, groupName *uint16, level uint32, buf *byte, totalEntries uint32) (status error) {
|
||||||
r0, _, _ := syscall.Syscall6(procNetLocalGroupAddMembers.Addr(), 5, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(groupName)), uintptr(level), uintptr(unsafe.Pointer(buf)), uintptr(totalEntries), 0)
|
r0, _, _ := syscall.SyscallN(procNetLocalGroupAddMembers.Addr(), uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(groupName)), uintptr(level), uintptr(unsafe.Pointer(buf)), uintptr(totalEntries))
|
||||||
if r0 != 0 {
|
if r0 != 0 {
|
||||||
status = syscall.Errno(r0)
|
status = syscall.Errno(r0)
|
||||||
}
|
}
|
||||||
|
@ -537,7 +534,7 @@ func netLocalGroupAddMembers(serverName *uint16, groupName *uint16, level uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
func netLocalGroupGetInfo(serverName *uint16, groupName *uint16, level uint32, bufptr **byte) (status error) {
|
func netLocalGroupGetInfo(serverName *uint16, groupName *uint16, level uint32, bufptr **byte) (status error) {
|
||||||
r0, _, _ := syscall.Syscall6(procNetLocalGroupGetInfo.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(groupName)), uintptr(level), uintptr(unsafe.Pointer(bufptr)), 0, 0)
|
r0, _, _ := syscall.SyscallN(procNetLocalGroupGetInfo.Addr(), uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(groupName)), uintptr(level), uintptr(unsafe.Pointer(bufptr)))
|
||||||
if r0 != 0 {
|
if r0 != 0 {
|
||||||
status = syscall.Errno(r0)
|
status = syscall.Errno(r0)
|
||||||
}
|
}
|
||||||
|
@ -545,7 +542,7 @@ func netLocalGroupGetInfo(serverName *uint16, groupName *uint16, level uint32, b
|
||||||
}
|
}
|
||||||
|
|
||||||
func netUserAdd(serverName *uint16, level uint32, buf *byte, parm_err *uint32) (status error) {
|
func netUserAdd(serverName *uint16, level uint32, buf *byte, parm_err *uint32) (status error) {
|
||||||
r0, _, _ := syscall.Syscall6(procNetUserAdd.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(parm_err)), 0, 0)
|
r0, _, _ := syscall.SyscallN(procNetUserAdd.Addr(), uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(parm_err)))
|
||||||
if r0 != 0 {
|
if r0 != 0 {
|
||||||
status = syscall.Errno(r0)
|
status = syscall.Errno(r0)
|
||||||
}
|
}
|
||||||
|
@ -553,7 +550,7 @@ func netUserAdd(serverName *uint16, level uint32, buf *byte, parm_err *uint32) (
|
||||||
}
|
}
|
||||||
|
|
||||||
func netUserDel(serverName *uint16, username *uint16) (status error) {
|
func netUserDel(serverName *uint16, username *uint16) (status error) {
|
||||||
r0, _, _ := syscall.Syscall(procNetUserDel.Addr(), 2, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(username)), 0)
|
r0, _, _ := syscall.SyscallN(procNetUserDel.Addr(), uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(username)))
|
||||||
if r0 != 0 {
|
if r0 != 0 {
|
||||||
status = syscall.Errno(r0)
|
status = syscall.Errno(r0)
|
||||||
}
|
}
|
||||||
|
@ -561,25 +558,25 @@ func netUserDel(serverName *uint16, username *uint16) (status error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func NtCreateFile(handle *uintptr, accessMask uint32, oa *ObjectAttributes, iosb *IOStatusBlock, allocationSize *uint64, fileAttributes uint32, shareAccess uint32, createDisposition uint32, createOptions uint32, eaBuffer *byte, eaLength uint32) (status uint32) {
|
func NtCreateFile(handle *uintptr, accessMask uint32, oa *ObjectAttributes, iosb *IOStatusBlock, allocationSize *uint64, fileAttributes uint32, shareAccess uint32, createDisposition uint32, createOptions uint32, eaBuffer *byte, eaLength uint32) (status uint32) {
|
||||||
r0, _, _ := syscall.Syscall12(procNtCreateFile.Addr(), 11, uintptr(unsafe.Pointer(handle)), uintptr(accessMask), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(fileAttributes), uintptr(shareAccess), uintptr(createDisposition), uintptr(createOptions), uintptr(unsafe.Pointer(eaBuffer)), uintptr(eaLength), 0)
|
r0, _, _ := syscall.SyscallN(procNtCreateFile.Addr(), uintptr(unsafe.Pointer(handle)), uintptr(accessMask), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(fileAttributes), uintptr(shareAccess), uintptr(createDisposition), uintptr(createOptions), uintptr(unsafe.Pointer(eaBuffer)), uintptr(eaLength))
|
||||||
status = uint32(r0)
|
status = uint32(r0)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func NtCreateJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) {
|
func NtCreateJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) {
|
||||||
r0, _, _ := syscall.Syscall(procNtCreateJobObject.Addr(), 3, uintptr(unsafe.Pointer(jobHandle)), uintptr(desiredAccess), uintptr(unsafe.Pointer(objAttributes)))
|
r0, _, _ := syscall.SyscallN(procNtCreateJobObject.Addr(), uintptr(unsafe.Pointer(jobHandle)), uintptr(desiredAccess), uintptr(unsafe.Pointer(objAttributes)))
|
||||||
status = uint32(r0)
|
status = uint32(r0)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func NtOpenDirectoryObject(handle *uintptr, accessMask uint32, oa *ObjectAttributes) (status uint32) {
|
func NtOpenDirectoryObject(handle *uintptr, accessMask uint32, oa *ObjectAttributes) (status uint32) {
|
||||||
r0, _, _ := syscall.Syscall(procNtOpenDirectoryObject.Addr(), 3, uintptr(unsafe.Pointer(handle)), uintptr(accessMask), uintptr(unsafe.Pointer(oa)))
|
r0, _, _ := syscall.SyscallN(procNtOpenDirectoryObject.Addr(), uintptr(unsafe.Pointer(handle)), uintptr(accessMask), uintptr(unsafe.Pointer(oa)))
|
||||||
status = uint32(r0)
|
status = uint32(r0)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func NtOpenJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) {
|
func NtOpenJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) {
|
||||||
r0, _, _ := syscall.Syscall(procNtOpenJobObject.Addr(), 3, uintptr(unsafe.Pointer(jobHandle)), uintptr(desiredAccess), uintptr(unsafe.Pointer(objAttributes)))
|
r0, _, _ := syscall.SyscallN(procNtOpenJobObject.Addr(), uintptr(unsafe.Pointer(jobHandle)), uintptr(desiredAccess), uintptr(unsafe.Pointer(objAttributes)))
|
||||||
status = uint32(r0)
|
status = uint32(r0)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -593,31 +590,31 @@ func NtQueryDirectoryObject(handle uintptr, buffer *byte, length uint32, singleE
|
||||||
if restartScan {
|
if restartScan {
|
||||||
_p1 = 1
|
_p1 = 1
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall9(procNtQueryDirectoryObject.Addr(), 7, uintptr(handle), uintptr(unsafe.Pointer(buffer)), uintptr(length), uintptr(_p0), uintptr(_p1), uintptr(unsafe.Pointer(context)), uintptr(unsafe.Pointer(returnLength)), 0, 0)
|
r0, _, _ := syscall.SyscallN(procNtQueryDirectoryObject.Addr(), uintptr(handle), uintptr(unsafe.Pointer(buffer)), uintptr(length), uintptr(_p0), uintptr(_p1), uintptr(unsafe.Pointer(context)), uintptr(unsafe.Pointer(returnLength)))
|
||||||
status = uint32(r0)
|
status = uint32(r0)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func NtQueryInformationProcess(processHandle windows.Handle, processInfoClass uint32, processInfo unsafe.Pointer, processInfoLength uint32, returnLength *uint32) (status uint32) {
|
func NtQueryInformationProcess(processHandle windows.Handle, processInfoClass uint32, processInfo unsafe.Pointer, processInfoLength uint32, returnLength *uint32) (status uint32) {
|
||||||
r0, _, _ := syscall.Syscall6(procNtQueryInformationProcess.Addr(), 5, uintptr(processHandle), uintptr(processInfoClass), uintptr(processInfo), uintptr(processInfoLength), uintptr(unsafe.Pointer(returnLength)), 0)
|
r0, _, _ := syscall.SyscallN(procNtQueryInformationProcess.Addr(), uintptr(processHandle), uintptr(processInfoClass), uintptr(processInfo), uintptr(processInfoLength), uintptr(unsafe.Pointer(returnLength)))
|
||||||
status = uint32(r0)
|
status = uint32(r0)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func NtQuerySystemInformation(systemInfoClass int, systemInformation unsafe.Pointer, systemInfoLength uint32, returnLength *uint32) (status uint32) {
|
func NtQuerySystemInformation(systemInfoClass int, systemInformation unsafe.Pointer, systemInfoLength uint32, returnLength *uint32) (status uint32) {
|
||||||
r0, _, _ := syscall.Syscall6(procNtQuerySystemInformation.Addr(), 4, uintptr(systemInfoClass), uintptr(systemInformation), uintptr(systemInfoLength), uintptr(unsafe.Pointer(returnLength)), 0, 0)
|
r0, _, _ := syscall.SyscallN(procNtQuerySystemInformation.Addr(), uintptr(systemInfoClass), uintptr(systemInformation), uintptr(systemInfoLength), uintptr(unsafe.Pointer(returnLength)))
|
||||||
status = uint32(r0)
|
status = uint32(r0)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func NtSetInformationFile(handle uintptr, iosb *IOStatusBlock, information uintptr, length uint32, class uint32) (status uint32) {
|
func NtSetInformationFile(handle uintptr, iosb *IOStatusBlock, information uintptr, length uint32, class uint32) (status uint32) {
|
||||||
r0, _, _ := syscall.Syscall6(procNtSetInformationFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(information), uintptr(length), uintptr(class), 0)
|
r0, _, _ := syscall.SyscallN(procNtSetInformationFile.Addr(), uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(information), uintptr(length), uintptr(class))
|
||||||
status = uint32(r0)
|
status = uint32(r0)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func RtlNtStatusToDosError(status uint32) (winerr error) {
|
func RtlNtStatusToDosError(status uint32) (winerr error) {
|
||||||
r0, _, _ := syscall.Syscall(procRtlNtStatusToDosError.Addr(), 1, uintptr(status), 0, 0)
|
r0, _, _ := syscall.SyscallN(procRtlNtStatusToDosError.Addr(), uintptr(status))
|
||||||
if r0 != 0 {
|
if r0 != 0 {
|
||||||
winerr = syscall.Errno(r0)
|
winerr = syscall.Errno(r0)
|
||||||
}
|
}
|
||||||
|
@ -625,7 +622,7 @@ func RtlNtStatusToDosError(status uint32) (winerr error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func ORCloseHive(handle ORHKey) (win32err error) {
|
func ORCloseHive(handle ORHKey) (win32err error) {
|
||||||
r0, _, _ := syscall.Syscall(procORCloseHive.Addr(), 1, uintptr(handle), 0, 0)
|
r0, _, _ := syscall.SyscallN(procORCloseHive.Addr(), uintptr(handle))
|
||||||
if r0 != 0 {
|
if r0 != 0 {
|
||||||
win32err = syscall.Errno(r0)
|
win32err = syscall.Errno(r0)
|
||||||
}
|
}
|
||||||
|
@ -633,7 +630,7 @@ func ORCloseHive(handle ORHKey) (win32err error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func ORCloseKey(handle ORHKey) (win32err error) {
|
func ORCloseKey(handle ORHKey) (win32err error) {
|
||||||
r0, _, _ := syscall.Syscall(procORCloseKey.Addr(), 1, uintptr(handle), 0, 0)
|
r0, _, _ := syscall.SyscallN(procORCloseKey.Addr(), uintptr(handle))
|
||||||
if r0 != 0 {
|
if r0 != 0 {
|
||||||
win32err = syscall.Errno(r0)
|
win32err = syscall.Errno(r0)
|
||||||
}
|
}
|
||||||
|
@ -641,7 +638,7 @@ func ORCloseKey(handle ORHKey) (win32err error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func ORCreateHive(key *ORHKey) (win32err error) {
|
func ORCreateHive(key *ORHKey) (win32err error) {
|
||||||
r0, _, _ := syscall.Syscall(procORCreateHive.Addr(), 1, uintptr(unsafe.Pointer(key)), 0, 0)
|
r0, _, _ := syscall.SyscallN(procORCreateHive.Addr(), uintptr(unsafe.Pointer(key)))
|
||||||
if r0 != 0 {
|
if r0 != 0 {
|
||||||
win32err = syscall.Errno(r0)
|
win32err = syscall.Errno(r0)
|
||||||
}
|
}
|
||||||
|
@ -658,7 +655,7 @@ func ORCreateKey(handle ORHKey, subKey string, class uintptr, options uint32, se
|
||||||
}
|
}
|
||||||
|
|
||||||
func _ORCreateKey(handle ORHKey, subKey *uint16, class uintptr, options uint32, securityDescriptor uintptr, result *ORHKey, disposition *uint32) (win32err error) {
|
func _ORCreateKey(handle ORHKey, subKey *uint16, class uintptr, options uint32, securityDescriptor uintptr, result *ORHKey, disposition *uint32) (win32err error) {
|
||||||
r0, _, _ := syscall.Syscall9(procORCreateKey.Addr(), 7, uintptr(handle), uintptr(unsafe.Pointer(subKey)), uintptr(class), uintptr(options), uintptr(securityDescriptor), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition)), 0, 0)
|
r0, _, _ := syscall.SyscallN(procORCreateKey.Addr(), uintptr(handle), uintptr(unsafe.Pointer(subKey)), uintptr(class), uintptr(options), uintptr(securityDescriptor), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition)))
|
||||||
if r0 != 0 {
|
if r0 != 0 {
|
||||||
win32err = syscall.Errno(r0)
|
win32err = syscall.Errno(r0)
|
||||||
}
|
}
|
||||||
|
@ -675,7 +672,7 @@ func ORDeleteKey(handle ORHKey, subKey string) (win32err error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func _ORDeleteKey(handle ORHKey, subKey *uint16) (win32err error) {
|
func _ORDeleteKey(handle ORHKey, subKey *uint16) (win32err error) {
|
||||||
r0, _, _ := syscall.Syscall(procORDeleteKey.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(subKey)), 0)
|
r0, _, _ := syscall.SyscallN(procORDeleteKey.Addr(), uintptr(handle), uintptr(unsafe.Pointer(subKey)))
|
||||||
if r0 != 0 {
|
if r0 != 0 {
|
||||||
win32err = syscall.Errno(r0)
|
win32err = syscall.Errno(r0)
|
||||||
}
|
}
|
||||||
|
@ -697,7 +694,7 @@ func ORGetValue(handle ORHKey, subKey string, value string, valueType *uint32, d
|
||||||
}
|
}
|
||||||
|
|
||||||
func _ORGetValue(handle ORHKey, subKey *uint16, value *uint16, valueType *uint32, data *byte, dataLen *uint32) (win32err error) {
|
func _ORGetValue(handle ORHKey, subKey *uint16, value *uint16, valueType *uint32, data *byte, dataLen *uint32) (win32err error) {
|
||||||
r0, _, _ := syscall.Syscall6(procORGetValue.Addr(), 6, uintptr(handle), uintptr(unsafe.Pointer(subKey)), uintptr(unsafe.Pointer(value)), uintptr(unsafe.Pointer(valueType)), uintptr(unsafe.Pointer(data)), uintptr(unsafe.Pointer(dataLen)))
|
r0, _, _ := syscall.SyscallN(procORGetValue.Addr(), uintptr(handle), uintptr(unsafe.Pointer(subKey)), uintptr(unsafe.Pointer(value)), uintptr(unsafe.Pointer(valueType)), uintptr(unsafe.Pointer(data)), uintptr(unsafe.Pointer(dataLen)))
|
||||||
if r0 != 0 {
|
if r0 != 0 {
|
||||||
win32err = syscall.Errno(r0)
|
win32err = syscall.Errno(r0)
|
||||||
}
|
}
|
||||||
|
@ -709,7 +706,7 @@ func ORMergeHives(hiveHandles []ORHKey, result *ORHKey) (win32err error) {
|
||||||
if len(hiveHandles) > 0 {
|
if len(hiveHandles) > 0 {
|
||||||
_p0 = &hiveHandles[0]
|
_p0 = &hiveHandles[0]
|
||||||
}
|
}
|
||||||
r0, _, _ := syscall.Syscall(procORMergeHives.Addr(), 3, uintptr(unsafe.Pointer(_p0)), uintptr(len(hiveHandles)), uintptr(unsafe.Pointer(result)))
|
r0, _, _ := syscall.SyscallN(procORMergeHives.Addr(), uintptr(unsafe.Pointer(_p0)), uintptr(len(hiveHandles)), uintptr(unsafe.Pointer(result)))
|
||||||
if r0 != 0 {
|
if r0 != 0 {
|
||||||
win32err = syscall.Errno(r0)
|
win32err = syscall.Errno(r0)
|
||||||
}
|
}
|
||||||
|
@ -726,7 +723,7 @@ func OROpenHive(hivePath string, result *ORHKey) (win32err error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func _OROpenHive(hivePath *uint16, result *ORHKey) (win32err error) {
|
func _OROpenHive(hivePath *uint16, result *ORHKey) (win32err error) {
|
||||||
r0, _, _ := syscall.Syscall(procOROpenHive.Addr(), 2, uintptr(unsafe.Pointer(hivePath)), uintptr(unsafe.Pointer(result)), 0)
|
r0, _, _ := syscall.SyscallN(procOROpenHive.Addr(), uintptr(unsafe.Pointer(hivePath)), uintptr(unsafe.Pointer(result)))
|
||||||
if r0 != 0 {
|
if r0 != 0 {
|
||||||
win32err = syscall.Errno(r0)
|
win32err = syscall.Errno(r0)
|
||||||
}
|
}
|
||||||
|
@ -743,7 +740,7 @@ func OROpenKey(handle ORHKey, subKey string, result *ORHKey) (win32err error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func _OROpenKey(handle ORHKey, subKey *uint16, result *ORHKey) (win32err error) {
|
func _OROpenKey(handle ORHKey, subKey *uint16, result *ORHKey) (win32err error) {
|
||||||
r0, _, _ := syscall.Syscall(procOROpenKey.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(subKey)), uintptr(unsafe.Pointer(result)))
|
r0, _, _ := syscall.SyscallN(procOROpenKey.Addr(), uintptr(handle), uintptr(unsafe.Pointer(subKey)), uintptr(unsafe.Pointer(result)))
|
||||||
if r0 != 0 {
|
if r0 != 0 {
|
||||||
win32err = syscall.Errno(r0)
|
win32err = syscall.Errno(r0)
|
||||||
}
|
}
|
||||||
|
@ -760,7 +757,7 @@ func ORSaveHive(handle ORHKey, hivePath string, osMajorVersion uint32, osMinorVe
|
||||||
}
|
}
|
||||||
|
|
||||||
func _ORSaveHive(handle ORHKey, hivePath *uint16, osMajorVersion uint32, osMinorVersion uint32) (win32err error) {
|
func _ORSaveHive(handle ORHKey, hivePath *uint16, osMajorVersion uint32, osMinorVersion uint32) (win32err error) {
|
||||||
r0, _, _ := syscall.Syscall6(procORSaveHive.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(hivePath)), uintptr(osMajorVersion), uintptr(osMinorVersion), 0, 0)
|
r0, _, _ := syscall.SyscallN(procORSaveHive.Addr(), uintptr(handle), uintptr(unsafe.Pointer(hivePath)), uintptr(osMajorVersion), uintptr(osMinorVersion))
|
||||||
if r0 != 0 {
|
if r0 != 0 {
|
||||||
win32err = syscall.Errno(r0)
|
win32err = syscall.Errno(r0)
|
||||||
}
|
}
|
||||||
|
@ -777,7 +774,7 @@ func ORSetValue(handle ORHKey, valueName string, valueType uint32, data *byte, d
|
||||||
}
|
}
|
||||||
|
|
||||||
func _ORSetValue(handle ORHKey, valueName *uint16, valueType uint32, data *byte, dataLen uint32) (win32err error) {
|
func _ORSetValue(handle ORHKey, valueName *uint16, valueType uint32, data *byte, dataLen uint32) (win32err error) {
|
||||||
r0, _, _ := syscall.Syscall6(procORSetValue.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(valueName)), uintptr(valueType), uintptr(unsafe.Pointer(data)), uintptr(dataLen), 0)
|
r0, _, _ := syscall.SyscallN(procORSetValue.Addr(), uintptr(handle), uintptr(unsafe.Pointer(valueName)), uintptr(valueType), uintptr(unsafe.Pointer(data)), uintptr(dataLen))
|
||||||
if r0 != 0 {
|
if r0 != 0 {
|
||||||
win32err = syscall.Errno(r0)
|
win32err = syscall.Errno(r0)
|
||||||
}
|
}
|
||||||
|
|
|
@ -33,9 +33,6 @@ func errnoErr(e syscall.Errno) error {
|
||||||
case errnoERROR_IO_PENDING:
|
case errnoERROR_IO_PENDING:
|
||||||
return errERROR_IO_PENDING
|
return errERROR_IO_PENDING
|
||||||
}
|
}
|
||||||
// TODO: add more here, after collecting data on the common
|
|
||||||
// error values see on Windows. (perhaps when running
|
|
||||||
// all.bat?)
|
|
||||||
return e
|
return e
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -46,7 +43,7 @@ var (
|
||||||
)
|
)
|
||||||
|
|
||||||
func SetCurrentThreadCompartmentId(compartmentId uint32) (hr error) {
|
func SetCurrentThreadCompartmentId(compartmentId uint32) (hr error) {
|
||||||
r0, _, _ := syscall.Syscall(procSetCurrentThreadCompartmentId.Addr(), 1, uintptr(compartmentId), 0, 0)
|
r0, _, _ := syscall.SyscallN(procSetCurrentThreadCompartmentId.Addr(), uintptr(compartmentId))
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
if r0&0x1fff0000 == 0x00070000 {
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
r0 &= 0xffff
|
r0 &= 0xffff
|
||||||
|
|
|
@ -47,13 +47,17 @@ func (ic *imageCopier) blobPipelineDecryptionStep(stream *sourceStream, srcInfo
|
||||||
desc := imgspecv1.Descriptor{
|
desc := imgspecv1.Descriptor{
|
||||||
Annotations: stream.info.Annotations,
|
Annotations: stream.info.Annotations,
|
||||||
}
|
}
|
||||||
reader, decryptedDigest, err := ocicrypt.DecryptLayer(ic.c.options.OciDecryptConfig, stream.reader, desc, false)
|
// DecryptLayer supposedly returns a digest of the decrypted stream.
|
||||||
|
// In pratice, that value is never set in the current implementation.
|
||||||
|
// And we shouldn’t use it anyway, because it is not trusted: encryption can be made to a public key,
|
||||||
|
// i.e. it doesn’t authenticate the origin of the metadata in any way.
|
||||||
|
reader, _, err := ocicrypt.DecryptLayer(ic.c.options.OciDecryptConfig, stream.reader, desc, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("decrypting layer %s: %w", srcInfo.Digest, err)
|
return nil, fmt.Errorf("decrypting layer %s: %w", srcInfo.Digest, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
stream.reader = reader
|
stream.reader = reader
|
||||||
stream.info.Digest = decryptedDigest
|
stream.info.Digest = ""
|
||||||
stream.info.Size = -1
|
stream.info.Size = -1
|
||||||
maps.DeleteFunc(stream.info.Annotations, func(k string, _ string) bool {
|
maps.DeleteFunc(stream.info.Annotations, func(k string, _ string) bool {
|
||||||
return strings.HasPrefix(k, "org.opencontainers.image.enc")
|
return strings.HasPrefix(k, "org.opencontainers.image.enc")
|
||||||
|
|
|
@ -74,7 +74,7 @@ func determineManifestConversion(in determineManifestConversionInputs) (manifest
|
||||||
srcType := in.srcMIMEType
|
srcType := in.srcMIMEType
|
||||||
normalizedSrcType := manifest.NormalizedMIMEType(srcType)
|
normalizedSrcType := manifest.NormalizedMIMEType(srcType)
|
||||||
if srcType != normalizedSrcType {
|
if srcType != normalizedSrcType {
|
||||||
logrus.Debugf("Source manifest MIME type %s, treating it as %s", srcType, normalizedSrcType)
|
logrus.Debugf("Source manifest MIME type %q, treating it as %q", srcType, normalizedSrcType)
|
||||||
srcType = normalizedSrcType
|
srcType = normalizedSrcType
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -237,7 +237,7 @@ func (c *copier) determineListConversion(currentListMIMEType string, destSupport
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
logrus.Debugf("Manifest list has MIME type %s, ordered candidate list [%s]", currentListMIMEType, strings.Join(destSupportedMIMETypes, ", "))
|
logrus.Debugf("Manifest list has MIME type %q, ordered candidate list [%s]", currentListMIMEType, strings.Join(destSupportedMIMETypes, ", "))
|
||||||
if len(prioritizedTypes.list) == 0 {
|
if len(prioritizedTypes.list) == 0 {
|
||||||
return "", nil, fmt.Errorf("destination does not support any supported manifest list types (%v)", manifest.SupportedListMIMETypes)
|
return "", nil, fmt.Errorf("destination does not support any supported manifest list types (%v)", manifest.SupportedListMIMETypes)
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"math"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/containers/image/v5/internal/private"
|
"github.com/containers/image/v5/internal/private"
|
||||||
|
@ -151,12 +152,18 @@ type blobChunkAccessorProxy struct {
|
||||||
// The specified chunks must be not overlapping and sorted by their offset.
|
// The specified chunks must be not overlapping and sorted by their offset.
|
||||||
// The readers must be fully consumed, in the order they are returned, before blocking
|
// The readers must be fully consumed, in the order they are returned, before blocking
|
||||||
// to read the next chunk.
|
// to read the next chunk.
|
||||||
|
// If the Length for the last chunk is set to math.MaxUint64, then it
|
||||||
|
// fully fetches the remaining data from the offset to the end of the blob.
|
||||||
func (s *blobChunkAccessorProxy) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
|
func (s *blobChunkAccessorProxy) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
rc, errs, err := s.wrapped.GetBlobAt(ctx, info, chunks)
|
rc, errs, err := s.wrapped.GetBlobAt(ctx, info, chunks)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
total := int64(0)
|
total := int64(0)
|
||||||
for _, c := range chunks {
|
for _, c := range chunks {
|
||||||
|
// do not update the progress bar if there is a chunk with unknown length.
|
||||||
|
if c.Length == math.MaxUint64 {
|
||||||
|
return rc, errs, err
|
||||||
|
}
|
||||||
total += int64(c.Length)
|
total += int64(c.Length)
|
||||||
}
|
}
|
||||||
s.bar.EwmaIncrInt64(total, time.Since(start))
|
s.bar.EwmaIncrInt64(total, time.Since(start))
|
||||||
|
|
|
@ -78,7 +78,7 @@ func (r *Reader) List() ([][]types.ImageReference, error) {
|
||||||
}
|
}
|
||||||
nt, ok := parsedTag.(reference.NamedTagged)
|
nt, ok := parsedTag.(reference.NamedTagged)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("Invalid tag %s (%s): does not contain a tag", tag, parsedTag.String())
|
return nil, fmt.Errorf("Invalid tag %q (%s): does not contain a tag", tag, parsedTag.String())
|
||||||
}
|
}
|
||||||
ref, err := newReference(r.path, nt, -1, r.archive, nil)
|
ref, err := newReference(r.path, nt, -1, r.archive, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -116,7 +116,7 @@ func imageLoad(ctx context.Context, c *client.Client, reader *io.PipeReader) err
|
||||||
return fmt.Errorf("parsing docker load progress: %w", err)
|
return fmt.Errorf("parsing docker load progress: %w", err)
|
||||||
}
|
}
|
||||||
if msg.Error != nil {
|
if msg.Error != nil {
|
||||||
return fmt.Errorf("docker engine reported: %s", msg.Error.Message)
|
return fmt.Errorf("docker engine reported: %q", msg.Error.Message)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil // No error reported = success
|
return nil // No error reported = success
|
||||||
|
|
|
@ -1097,6 +1097,11 @@ func isManifestUnknownError(err error) bool {
|
||||||
if errors.As(err, &e) && e.ErrorCode() == errcode.ErrorCodeUnknown && e.Message == "Not Found" {
|
if errors.As(err, &e) && e.ErrorCode() == errcode.ErrorCodeUnknown && e.Message == "Not Found" {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
// Harbor v2.10.2
|
||||||
|
if errors.As(err, &e) && e.ErrorCode() == errcode.ErrorCodeUnknown && strings.Contains(strings.ToLower(e.Message), "not found") {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
// opencontainers/distribution-spec does not require the errcode.Error payloads to be used,
|
// opencontainers/distribution-spec does not require the errcode.Error payloads to be used,
|
||||||
// but specifies that the HTTP status must be 404.
|
// but specifies that the HTTP status must be 404.
|
||||||
var unexpected *unexpectedHTTPResponseError
|
var unexpected *unexpectedHTTPResponseError
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"math"
|
||||||
"mime"
|
"mime"
|
||||||
"mime/multipart"
|
"mime/multipart"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
@ -260,9 +261,15 @@ func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error,
|
||||||
}
|
}
|
||||||
currentOffset += toSkip
|
currentOffset += toSkip
|
||||||
}
|
}
|
||||||
|
var reader io.Reader
|
||||||
|
if c.Length == math.MaxUint64 {
|
||||||
|
reader = body
|
||||||
|
} else {
|
||||||
|
reader = io.LimitReader(body, int64(c.Length))
|
||||||
|
}
|
||||||
s := signalCloseReader{
|
s := signalCloseReader{
|
||||||
closed: make(chan struct{}),
|
closed: make(chan struct{}),
|
||||||
stream: io.NopCloser(io.LimitReader(body, int64(c.Length))),
|
stream: io.NopCloser(reader),
|
||||||
consumeStream: true,
|
consumeStream: true,
|
||||||
}
|
}
|
||||||
streams <- s
|
streams <- s
|
||||||
|
@ -343,12 +350,24 @@ func parseMediaType(contentType string) (string, map[string]string, error) {
|
||||||
// The specified chunks must be not overlapping and sorted by their offset.
|
// The specified chunks must be not overlapping and sorted by their offset.
|
||||||
// The readers must be fully consumed, in the order they are returned, before blocking
|
// The readers must be fully consumed, in the order they are returned, before blocking
|
||||||
// to read the next chunk.
|
// to read the next chunk.
|
||||||
|
// If the Length for the last chunk is set to math.MaxUint64, then it
|
||||||
|
// fully fetches the remaining data from the offset to the end of the blob.
|
||||||
func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
|
func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
|
||||||
headers := make(map[string][]string)
|
headers := make(map[string][]string)
|
||||||
|
|
||||||
rangeVals := make([]string, 0, len(chunks))
|
rangeVals := make([]string, 0, len(chunks))
|
||||||
|
lastFound := false
|
||||||
for _, c := range chunks {
|
for _, c := range chunks {
|
||||||
rangeVals = append(rangeVals, fmt.Sprintf("%d-%d", c.Offset, c.Offset+c.Length-1))
|
if lastFound {
|
||||||
|
return nil, nil, fmt.Errorf("internal error: another chunk requested after an util-EOF chunk")
|
||||||
|
}
|
||||||
|
// If the Length is set to -1, then request anything after the specified offset.
|
||||||
|
if c.Length == math.MaxUint64 {
|
||||||
|
lastFound = true
|
||||||
|
rangeVals = append(rangeVals, fmt.Sprintf("%d-", c.Offset))
|
||||||
|
} else {
|
||||||
|
rangeVals = append(rangeVals, fmt.Sprintf("%d-%d", c.Offset, c.Offset+c.Length-1))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
headers["Range"] = []string{fmt.Sprintf("bytes=%s", strings.Join(rangeVals, ","))}
|
headers["Range"] = []string{fmt.Sprintf("bytes=%s", strings.Join(rangeVals, ","))}
|
||||||
|
|
|
@ -231,7 +231,7 @@ func (r *Reader) openTarComponent(componentPath string) (io.ReadCloser, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if !header.FileInfo().Mode().IsRegular() {
|
if !header.FileInfo().Mode().IsRegular() {
|
||||||
return nil, fmt.Errorf("Error reading tar archive component %s: not a regular file", header.Name)
|
return nil, fmt.Errorf("Error reading tar archive component %q: not a regular file", header.Name)
|
||||||
}
|
}
|
||||||
succeeded = true
|
succeeded = true
|
||||||
return &tarReadCloser{Reader: tarReader, backingFile: f}, nil
|
return &tarReadCloser{Reader: tarReader, backingFile: f}, nil
|
||||||
|
@ -262,7 +262,7 @@ func findTarComponent(inputFile io.Reader, componentPath string) (*tar.Reader, *
|
||||||
func (r *Reader) readTarComponent(path string, limit int) ([]byte, error) {
|
func (r *Reader) readTarComponent(path string, limit int) ([]byte, error) {
|
||||||
file, err := r.openTarComponent(path)
|
file, err := r.openTarComponent(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("loading tar component %s: %w", path, err)
|
return nil, fmt.Errorf("loading tar component %q: %w", path, err)
|
||||||
}
|
}
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
bytes, err := iolimits.ReadAtMost(file, limit)
|
bytes, err := iolimits.ReadAtMost(file, limit)
|
||||||
|
|
|
@ -95,10 +95,10 @@ func (s *Source) ensureCachedDataIsPresentPrivate() error {
|
||||||
}
|
}
|
||||||
var parsedConfig manifest.Schema2Image // There's a lot of info there, but we only really care about layer DiffIDs.
|
var parsedConfig manifest.Schema2Image // There's a lot of info there, but we only really care about layer DiffIDs.
|
||||||
if err := json.Unmarshal(configBytes, &parsedConfig); err != nil {
|
if err := json.Unmarshal(configBytes, &parsedConfig); err != nil {
|
||||||
return fmt.Errorf("decoding tar config %s: %w", tarManifest.Config, err)
|
return fmt.Errorf("decoding tar config %q: %w", tarManifest.Config, err)
|
||||||
}
|
}
|
||||||
if parsedConfig.RootFS == nil {
|
if parsedConfig.RootFS == nil {
|
||||||
return fmt.Errorf("Invalid image config (rootFS is not set): %s", tarManifest.Config)
|
return fmt.Errorf("Invalid image config (rootFS is not set): %q", tarManifest.Config)
|
||||||
}
|
}
|
||||||
|
|
||||||
knownLayers, err := s.prepareLayerData(tarManifest, &parsedConfig)
|
knownLayers, err := s.prepareLayerData(tarManifest, &parsedConfig)
|
||||||
|
@ -144,7 +144,7 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif
|
||||||
}
|
}
|
||||||
layerPath := path.Clean(tarManifest.Layers[i])
|
layerPath := path.Clean(tarManifest.Layers[i])
|
||||||
if _, ok := unknownLayerSizes[layerPath]; ok {
|
if _, ok := unknownLayerSizes[layerPath]; ok {
|
||||||
return nil, fmt.Errorf("Layer tarfile %s used for two different DiffID values", layerPath)
|
return nil, fmt.Errorf("Layer tarfile %q used for two different DiffID values", layerPath)
|
||||||
}
|
}
|
||||||
li := &layerInfo{ // A new element in each iteration
|
li := &layerInfo{ // A new element in each iteration
|
||||||
path: layerPath,
|
path: layerPath,
|
||||||
|
@ -179,7 +179,7 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif
|
||||||
// the slower method of checking if it's compressed.
|
// the slower method of checking if it's compressed.
|
||||||
uncompressedStream, isCompressed, err := compression.AutoDecompress(t)
|
uncompressedStream, isCompressed, err := compression.AutoDecompress(t)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("auto-decompressing %s to determine its size: %w", layerPath, err)
|
return nil, fmt.Errorf("auto-decompressing %q to determine its size: %w", layerPath, err)
|
||||||
}
|
}
|
||||||
defer uncompressedStream.Close()
|
defer uncompressedStream.Close()
|
||||||
|
|
||||||
|
@ -187,7 +187,7 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif
|
||||||
if isCompressed {
|
if isCompressed {
|
||||||
uncompressedSize, err = io.Copy(io.Discard, uncompressedStream)
|
uncompressedSize, err = io.Copy(io.Discard, uncompressedStream)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("reading %s to find its size: %w", layerPath, err)
|
return nil, fmt.Errorf("reading %q to find its size: %w", layerPath, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
li.size = uncompressedSize
|
li.size = uncompressedSize
|
||||||
|
|
|
@ -164,7 +164,7 @@ func (w *Writer) writeLegacyMetadataLocked(layerDescriptors []manifest.Schema2De
|
||||||
return fmt.Errorf("marshaling layer config: %w", err)
|
return fmt.Errorf("marshaling layer config: %w", err)
|
||||||
}
|
}
|
||||||
delete(layerConfig, "layer_id")
|
delete(layerConfig, "layer_id")
|
||||||
layerID := digest.Canonical.FromBytes(b).Hex()
|
layerID := digest.Canonical.FromBytes(b).Encoded()
|
||||||
layerConfig["id"] = layerID
|
layerConfig["id"] = layerID
|
||||||
|
|
||||||
configBytes, err := json.Marshal(layerConfig)
|
configBytes, err := json.Marshal(layerConfig)
|
||||||
|
@ -309,10 +309,10 @@ func (w *Writer) Close() error {
|
||||||
// NOTE: This is an internal implementation detail, not a format property, and can change
|
// NOTE: This is an internal implementation detail, not a format property, and can change
|
||||||
// any time.
|
// any time.
|
||||||
func (w *Writer) configPath(configDigest digest.Digest) (string, error) {
|
func (w *Writer) configPath(configDigest digest.Digest) (string, error) {
|
||||||
if err := configDigest.Validate(); err != nil { // digest.Digest.Hex() panics on failure, and could possibly result in unexpected paths, so validate explicitly.
|
if err := configDigest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, and could possibly result in unexpected paths, so validate explicitly.
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
return configDigest.Hex() + ".json", nil
|
return configDigest.Encoded() + ".json", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// physicalLayerPath returns a path we choose for storing a layer with the specified digest
|
// physicalLayerPath returns a path we choose for storing a layer with the specified digest
|
||||||
|
@ -320,15 +320,15 @@ func (w *Writer) configPath(configDigest digest.Digest) (string, error) {
|
||||||
// NOTE: This is an internal implementation detail, not a format property, and can change
|
// NOTE: This is an internal implementation detail, not a format property, and can change
|
||||||
// any time.
|
// any time.
|
||||||
func (w *Writer) physicalLayerPath(layerDigest digest.Digest) (string, error) {
|
func (w *Writer) physicalLayerPath(layerDigest digest.Digest) (string, error) {
|
||||||
if err := layerDigest.Validate(); err != nil { // digest.Digest.Hex() panics on failure, and could possibly result in unexpected paths, so validate explicitly.
|
if err := layerDigest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, and could possibly result in unexpected paths, so validate explicitly.
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
// Note that this can't be e.g. filepath.Join(l.Digest.Hex(), legacyLayerFileName); due to the way
|
// Note that this can't be e.g. filepath.Join(l.Digest.Encoded(), legacyLayerFileName); due to the way
|
||||||
// writeLegacyMetadata constructs layer IDs differently from inputinfo.Digest values (as described
|
// writeLegacyMetadata constructs layer IDs differently from inputinfo.Digest values (as described
|
||||||
// inside it), most of the layers would end up in subdirectories alone without any metadata; (docker load)
|
// inside it), most of the layers would end up in subdirectories alone without any metadata; (docker load)
|
||||||
// tries to load every subdirectory as an image and fails if the config is missing. So, keep the layers
|
// tries to load every subdirectory as an image and fails if the config is missing. So, keep the layers
|
||||||
// in the root of the tarball.
|
// in the root of the tarball.
|
||||||
return layerDigest.Hex() + ".tar", nil
|
return layerDigest.Encoded() + ".tar", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type tarFI struct {
|
type tarFI struct {
|
||||||
|
|
|
@ -140,7 +140,7 @@ func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) {
|
||||||
|
|
||||||
if config.DefaultDocker != nil {
|
if config.DefaultDocker != nil {
|
||||||
if mergedConfig.DefaultDocker != nil {
|
if mergedConfig.DefaultDocker != nil {
|
||||||
return nil, fmt.Errorf(`Error parsing signature storage configuration: "default-docker" defined both in "%s" and "%s"`,
|
return nil, fmt.Errorf(`Error parsing signature storage configuration: "default-docker" defined both in %q and %q`,
|
||||||
dockerDefaultMergedFrom, configPath)
|
dockerDefaultMergedFrom, configPath)
|
||||||
}
|
}
|
||||||
mergedConfig.DefaultDocker = config.DefaultDocker
|
mergedConfig.DefaultDocker = config.DefaultDocker
|
||||||
|
@ -149,7 +149,7 @@ func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) {
|
||||||
|
|
||||||
for nsName, nsConfig := range config.Docker { // includes config.Docker == nil
|
for nsName, nsConfig := range config.Docker { // includes config.Docker == nil
|
||||||
if _, ok := mergedConfig.Docker[nsName]; ok {
|
if _, ok := mergedConfig.Docker[nsName]; ok {
|
||||||
return nil, fmt.Errorf(`Error parsing signature storage configuration: "docker" namespace "%s" defined both in "%s" and "%s"`,
|
return nil, fmt.Errorf(`Error parsing signature storage configuration: "docker" namespace %q defined both in %q and %q`,
|
||||||
nsName, nsMergedFrom[nsName], configPath)
|
nsName, nsMergedFrom[nsName], configPath)
|
||||||
}
|
}
|
||||||
mergedConfig.Docker[nsName] = nsConfig
|
mergedConfig.Docker[nsName] = nsConfig
|
||||||
|
@ -288,10 +288,10 @@ func (ns registryNamespace) signatureTopLevel(write bool) string {
|
||||||
// base is not nil from the caller
|
// base is not nil from the caller
|
||||||
// NOTE: Keep this in sync with docs/signature-protocols.md!
|
// NOTE: Keep this in sync with docs/signature-protocols.md!
|
||||||
func lookasideStorageURL(base lookasideStorageBase, manifestDigest digest.Digest, index int) (*url.URL, error) {
|
func lookasideStorageURL(base lookasideStorageBase, manifestDigest digest.Digest, index int) (*url.URL, error) {
|
||||||
if err := manifestDigest.Validate(); err != nil { // digest.Digest.Hex() panics on failure, and could possibly result in a path with ../, so validate explicitly.
|
if err := manifestDigest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, and could possibly result in a path with ../, so validate explicitly.
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
sigURL := *base
|
sigURL := *base
|
||||||
sigURL.Path = fmt.Sprintf("%s@%s=%s/signature-%d", sigURL.Path, manifestDigest.Algorithm(), manifestDigest.Hex(), index+1)
|
sigURL.Path = fmt.Sprintf("%s@%s=%s/signature-%d", sigURL.Path, manifestDigest.Algorithm(), manifestDigest.Encoded(), index+1)
|
||||||
return &sigURL, nil
|
return &sigURL, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -366,7 +366,7 @@ func v1IDFromBlobDigestAndComponents(blobDigest digest.Digest, others ...string)
|
||||||
if err := blobDigest.Validate(); err != nil {
|
if err := blobDigest.Validate(); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
parts := append([]string{blobDigest.Hex()}, others...)
|
parts := append([]string{blobDigest.Encoded()}, others...)
|
||||||
v1IDHash := sha256.Sum256([]byte(strings.Join(parts, " ")))
|
v1IDHash := sha256.Sum256([]byte(strings.Join(parts, " ")))
|
||||||
return hex.EncodeToString(v1IDHash[:]), nil
|
return hex.EncodeToString(v1IDHash[:]), nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -76,7 +76,7 @@ func manifestInstanceFromBlob(ctx context.Context, sys *types.SystemContext, src
|
||||||
case imgspecv1.MediaTypeImageIndex:
|
case imgspecv1.MediaTypeImageIndex:
|
||||||
return manifestOCI1FromImageIndex(ctx, sys, src, manblob)
|
return manifestOCI1FromImageIndex(ctx, sys, src, manblob)
|
||||||
default: // Note that this may not be reachable, manifest.NormalizedMIMEType has a default for unknown values.
|
default: // Note that this may not be reachable, manifest.NormalizedMIMEType has a default for unknown values.
|
||||||
return nil, fmt.Errorf("Unimplemented manifest MIME type %s", mt)
|
return nil, fmt.Errorf("Unimplemented manifest MIME type %q", mt)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
2
vendor/github.com/containers/image/v5/internal/imagesource/stubs/get_blob_at.go
generated
vendored
2
vendor/github.com/containers/image/v5/internal/imagesource/stubs/get_blob_at.go
generated
vendored
|
@ -39,6 +39,8 @@ func (stub NoGetBlobAtInitialize) SupportsGetBlobAt() bool {
|
||||||
// The specified chunks must be not overlapping and sorted by their offset.
|
// The specified chunks must be not overlapping and sorted by their offset.
|
||||||
// The readers must be fully consumed, in the order they are returned, before blocking
|
// The readers must be fully consumed, in the order they are returned, before blocking
|
||||||
// to read the next chunk.
|
// to read the next chunk.
|
||||||
|
// If the Length for the last chunk is set to math.MaxUint64, then it
|
||||||
|
// fully fetches the remaining data from the offset to the end of the blob.
|
||||||
func (stub NoGetBlobAtInitialize) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
|
func (stub NoGetBlobAtInitialize) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
|
||||||
return nil, nil, fmt.Errorf("internal error: GetBlobAt is not supported by the %q transport", stub.transportName)
|
return nil, nil, fmt.Errorf("internal error: GetBlobAt is not supported by the %q transport", stub.transportName)
|
||||||
}
|
}
|
||||||
|
|
|
@ -164,7 +164,7 @@ func (list *Schema2ListPublic) ChooseInstance(ctx *types.SystemContext) (digest.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return "", fmt.Errorf("no image found in manifest list for architecture %s, variant %q, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS)
|
return "", fmt.Errorf("no image found in manifest list for architecture %q, variant %q, OS %q", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Serialize returns the list in a blob format.
|
// Serialize returns the list in a blob format.
|
||||||
|
|
|
@ -129,5 +129,5 @@ func ListFromBlob(manifest []byte, manifestMIMEType string) (List, error) {
|
||||||
case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType:
|
case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType:
|
||||||
return nil, fmt.Errorf("Treating single images as manifest lists is not implemented")
|
return nil, fmt.Errorf("Treating single images as manifest lists is not implemented")
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("Unimplemented manifest list MIME type %s (normalized as %s)", manifestMIMEType, normalized)
|
return nil, fmt.Errorf("Unimplemented manifest list MIME type %q (normalized as %q)", manifestMIMEType, normalized)
|
||||||
}
|
}
|
||||||
|
|
|
@ -260,7 +260,7 @@ func (index *OCI1IndexPublic) chooseInstance(ctx *types.SystemContext, preferGzi
|
||||||
if bestMatch != nil {
|
if bestMatch != nil {
|
||||||
return bestMatch.digest, nil
|
return bestMatch.digest, nil
|
||||||
}
|
}
|
||||||
return "", fmt.Errorf("no image found in image index for architecture %s, variant %q, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS)
|
return "", fmt.Errorf("no image found in image index for architecture %q, variant %q, OS %q", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (index *OCI1Index) ChooseInstanceByCompression(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) {
|
func (index *OCI1Index) ChooseInstanceByCompression(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) {
|
||||||
|
|
8
vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go
generated
vendored
8
vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go
generated
vendored
|
@ -64,8 +64,8 @@ func getCPUInfo(pattern string) (info string, err error) {
|
||||||
return "", fmt.Errorf("getCPUInfo for pattern: %s not found", pattern)
|
return "", fmt.Errorf("getCPUInfo for pattern: %s not found", pattern)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getCPUVariantWindows(arch string) string {
|
func getCPUVariantDarwinWindows(arch string) string {
|
||||||
// Windows only supports v7 for ARM32 and v8 for ARM64 and so we can use
|
// Darwin and Windows only support v7 for ARM32 and v8 for ARM64 and so we can use
|
||||||
// runtime.GOARCH to determine the variants
|
// runtime.GOARCH to determine the variants
|
||||||
var variant string
|
var variant string
|
||||||
switch arch {
|
switch arch {
|
||||||
|
@ -133,8 +133,8 @@ func getCPUVariantArm() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func getCPUVariant(os string, arch string) string {
|
func getCPUVariant(os string, arch string) string {
|
||||||
if os == "windows" {
|
if os == "darwin" || os == "windows" {
|
||||||
return getCPUVariantWindows(arch)
|
return getCPUVariantDarwinWindows(arch)
|
||||||
}
|
}
|
||||||
if arch == "arm" || arch == "arm64" {
|
if arch == "arm" || arch == "arm64" {
|
||||||
return getCPUVariantArm()
|
return getCPUVariantArm()
|
||||||
|
|
|
@ -143,7 +143,11 @@ type ReusedBlob struct {
|
||||||
// ImageSourceChunk is a portion of a blob.
|
// ImageSourceChunk is a portion of a blob.
|
||||||
// This API is experimental and can be changed without bumping the major version number.
|
// This API is experimental and can be changed without bumping the major version number.
|
||||||
type ImageSourceChunk struct {
|
type ImageSourceChunk struct {
|
||||||
|
// Offset specifies the starting position of the chunk within the source blob.
|
||||||
Offset uint64
|
Offset uint64
|
||||||
|
|
||||||
|
// Length specifies the size of the chunk. If it is set to math.MaxUint64,
|
||||||
|
// then it refers to all the data from Offset to the end of the blob.
|
||||||
Length uint64
|
Length uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -154,6 +158,8 @@ type BlobChunkAccessor interface {
|
||||||
// The specified chunks must be not overlapping and sorted by their offset.
|
// The specified chunks must be not overlapping and sorted by their offset.
|
||||||
// The readers must be fully consumed, in the order they are returned, before blocking
|
// The readers must be fully consumed, in the order they are returned, before blocking
|
||||||
// to read the next chunk.
|
// to read the next chunk.
|
||||||
|
// If the Length for the last chunk is set to math.MaxUint64, then it
|
||||||
|
// fully fetches the remaining data from the offset to the end of the blob.
|
||||||
GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []ImageSourceChunk) (chan io.ReadCloser, chan error, error)
|
GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []ImageSourceChunk) (chan io.ReadCloser, chan error, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -67,15 +67,15 @@ func compressionVariantMIMEType(variantTable []compressionMIMETypeSet, mimeType
|
||||||
return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mimeType)}
|
return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mimeType)}
|
||||||
}
|
}
|
||||||
if name != mtsUncompressed {
|
if name != mtsUncompressed {
|
||||||
return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("unknown compressed with algorithm %s variant for type %s", name, mimeType)}
|
return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("unknown compressed with algorithm %s variant for type %q", name, mimeType)}
|
||||||
}
|
}
|
||||||
// We can't very well say “the idea of no compression is unknown”
|
// We can't very well say “the idea of no compression is unknown”
|
||||||
return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mimeType)}
|
return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mimeType)}
|
||||||
}
|
}
|
||||||
if algorithm != nil {
|
if algorithm != nil {
|
||||||
return "", fmt.Errorf("unsupported MIME type for compression: %s", mimeType)
|
return "", fmt.Errorf("unsupported MIME type for compression: %q", mimeType)
|
||||||
}
|
}
|
||||||
return "", fmt.Errorf("unsupported MIME type for decompression: %s", mimeType)
|
return "", fmt.Errorf("unsupported MIME type for decompression: %q", mimeType)
|
||||||
}
|
}
|
||||||
|
|
||||||
// updatedMIMEType returns the result of applying edits in updated (MediaType, CompressionOperation) to
|
// updatedMIMEType returns the result of applying edits in updated (MediaType, CompressionOperation) to
|
||||||
|
|
|
@ -221,7 +221,7 @@ func (m *Schema1) fixManifestLayers() error {
|
||||||
m.History = slices.Delete(m.History, i, i+1)
|
m.History = slices.Delete(m.History, i, i+1)
|
||||||
m.ExtractedV1Compatibility = slices.Delete(m.ExtractedV1Compatibility, i, i+1)
|
m.ExtractedV1Compatibility = slices.Delete(m.ExtractedV1Compatibility, i, i+1)
|
||||||
} else if m.ExtractedV1Compatibility[i].Parent != m.ExtractedV1Compatibility[i+1].ID {
|
} else if m.ExtractedV1Compatibility[i].Parent != m.ExtractedV1Compatibility[i+1].ID {
|
||||||
return fmt.Errorf("Invalid parent ID. Expected %v, got %v", m.ExtractedV1Compatibility[i+1].ID, m.ExtractedV1Compatibility[i].Parent)
|
return fmt.Errorf("Invalid parent ID. Expected %v, got %q", m.ExtractedV1Compatibility[i+1].ID, m.ExtractedV1Compatibility[i].Parent)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -342,5 +342,5 @@ func (m *Schema1) ImageID(diffIDs []digest.Digest) (string, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
return digest.FromBytes(image).Hex(), nil
|
return digest.FromBytes(image).Encoded(), nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -295,7 +295,7 @@ func (m *Schema2) ImageID([]digest.Digest) (string, error) {
|
||||||
if err := m.ConfigDescriptor.Digest.Validate(); err != nil {
|
if err := m.ConfigDescriptor.Digest.Validate(); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
return m.ConfigDescriptor.Digest.Hex(), nil
|
return m.ConfigDescriptor.Digest.Encoded(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image
|
// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image
|
||||||
|
|
|
@ -166,5 +166,5 @@ func FromBlob(manblob []byte, mt string) (Manifest, error) {
|
||||||
return nil, fmt.Errorf("Treating manifest lists as individual manifests is not implemented")
|
return nil, fmt.Errorf("Treating manifest lists as individual manifests is not implemented")
|
||||||
}
|
}
|
||||||
// Note that this may not be reachable, NormalizedMIMEType has a default for unknown values.
|
// Note that this may not be reachable, NormalizedMIMEType has a default for unknown values.
|
||||||
return nil, fmt.Errorf("Unimplemented manifest MIME type %s (normalized as %s)", mt, nmt)
|
return nil, fmt.Errorf("Unimplemented manifest MIME type %q (normalized as %q)", mt, nmt)
|
||||||
}
|
}
|
||||||
|
|
|
@ -167,7 +167,7 @@ func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
|
||||||
// an error if the mediatype does not support encryption
|
// an error if the mediatype does not support encryption
|
||||||
func getEncryptedMediaType(mediatype string) (string, error) {
|
func getEncryptedMediaType(mediatype string) (string, error) {
|
||||||
if slices.Contains(strings.Split(mediatype, "+")[1:], "encrypted") {
|
if slices.Contains(strings.Split(mediatype, "+")[1:], "encrypted") {
|
||||||
return "", fmt.Errorf("unsupported mediaType: %v already encrypted", mediatype)
|
return "", fmt.Errorf("unsupported mediaType: %q already encrypted", mediatype)
|
||||||
}
|
}
|
||||||
unsuffixedMediatype := strings.Split(mediatype, "+")[0]
|
unsuffixedMediatype := strings.Split(mediatype, "+")[0]
|
||||||
switch unsuffixedMediatype {
|
switch unsuffixedMediatype {
|
||||||
|
@ -176,7 +176,7 @@ func getEncryptedMediaType(mediatype string) (string, error) {
|
||||||
return mediatype + "+encrypted", nil
|
return mediatype + "+encrypted", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return "", fmt.Errorf("unsupported mediaType to encrypt: %v", mediatype)
|
return "", fmt.Errorf("unsupported mediaType to encrypt: %q", mediatype)
|
||||||
}
|
}
|
||||||
|
|
||||||
// getDecryptedMediaType will return the mediatype to its encrypted counterpart and return
|
// getDecryptedMediaType will return the mediatype to its encrypted counterpart and return
|
||||||
|
@ -184,7 +184,7 @@ func getEncryptedMediaType(mediatype string) (string, error) {
|
||||||
func getDecryptedMediaType(mediatype string) (string, error) {
|
func getDecryptedMediaType(mediatype string) (string, error) {
|
||||||
res, ok := strings.CutSuffix(mediatype, "+encrypted")
|
res, ok := strings.CutSuffix(mediatype, "+encrypted")
|
||||||
if !ok {
|
if !ok {
|
||||||
return "", fmt.Errorf("unsupported mediaType to decrypt: %v", mediatype)
|
return "", fmt.Errorf("unsupported mediaType to decrypt: %q", mediatype)
|
||||||
}
|
}
|
||||||
|
|
||||||
return res, nil
|
return res, nil
|
||||||
|
@ -260,7 +260,7 @@ func (m *OCI1) ImageID(diffIDs []digest.Digest) (string, error) {
|
||||||
if err := m.Config.Digest.Validate(); err != nil {
|
if err := m.Config.Digest.Validate(); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
return m.Config.Digest.Hex(), nil
|
return m.Config.Digest.Encoded(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image
|
// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image
|
||||||
|
|
|
@ -149,6 +149,8 @@ func (s *ociArchiveImageSource) SupportsGetBlobAt() bool {
|
||||||
// The specified chunks must be not overlapping and sorted by their offset.
|
// The specified chunks must be not overlapping and sorted by their offset.
|
||||||
// The readers must be fully consumed, in the order they are returned, before blocking
|
// The readers must be fully consumed, in the order they are returned, before blocking
|
||||||
// to read the next chunk.
|
// to read the next chunk.
|
||||||
|
// If the Length for the last chunk is set to math.MaxUint64, then it
|
||||||
|
// fully fetches the remaining data from the offset to the end of the blob.
|
||||||
func (s *ociArchiveImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
|
func (s *ociArchiveImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
|
||||||
return s.unpackedSrc.GetBlobAt(ctx, info, chunks)
|
return s.unpackedSrc.GetBlobAt(ctx, info, chunks)
|
||||||
}
|
}
|
||||||
|
|
|
@ -182,19 +182,19 @@ func (s *ociImageSource) getExternalBlob(ctx context.Context, urls []string) (io
|
||||||
hasSupportedURL = true
|
hasSupportedURL = true
|
||||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u, nil)
|
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errWrap = fmt.Errorf("fetching %s failed %s: %w", u, err.Error(), errWrap)
|
errWrap = fmt.Errorf("fetching %q failed %s: %w", u, err.Error(), errWrap)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := s.client.Do(req)
|
resp, err := s.client.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errWrap = fmt.Errorf("fetching %s failed %s: %w", u, err.Error(), errWrap)
|
errWrap = fmt.Errorf("fetching %q failed %s: %w", u, err.Error(), errWrap)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
if resp.StatusCode != http.StatusOK {
|
||||||
resp.Body.Close()
|
resp.Body.Close()
|
||||||
errWrap = fmt.Errorf("fetching %s failed, response code not 200: %w", u, errWrap)
|
errWrap = fmt.Errorf("fetching %q failed, response code not 200: %w", u, errWrap)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -256,5 +256,5 @@ func (ref ociReference) blobPath(digest digest.Digest, sharedBlobDir string) (st
|
||||||
} else {
|
} else {
|
||||||
blobDir = filepath.Join(ref.dir, imgspecv1.ImageBlobsDir)
|
blobDir = filepath.Join(ref.dir, imgspecv1.ImageBlobsDir)
|
||||||
}
|
}
|
||||||
return filepath.Join(blobDir, digest.Algorithm().String(), digest.Hex()), nil
|
return filepath.Join(blobDir, digest.Algorithm().String(), digest.Encoded()), nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -553,7 +553,7 @@ func (rules *clientConfigLoadingRules) Load() (*clientcmdConfig, error) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errlist = append(errlist, fmt.Errorf("loading config file \"%s\": %w", filename, err))
|
errlist = append(errlist, fmt.Errorf("loading config file %q: %w", filename, err))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -152,7 +152,7 @@ func (c *openshiftClient) getImage(ctx context.Context, imageStreamImageName str
|
||||||
func (c *openshiftClient) convertDockerImageReference(ref string) (string, error) {
|
func (c *openshiftClient) convertDockerImageReference(ref string) (string, error) {
|
||||||
_, repo, gotRepo := strings.Cut(ref, "/")
|
_, repo, gotRepo := strings.Cut(ref, "/")
|
||||||
if !gotRepo {
|
if !gotRepo {
|
||||||
return "", fmt.Errorf("Invalid format of docker reference %s: missing '/'", ref)
|
return "", fmt.Errorf("Invalid format of docker reference %q: missing '/'", ref)
|
||||||
}
|
}
|
||||||
return reference.Domain(c.ref.dockerReference) + "/" + repo, nil
|
return reference.Domain(c.ref.dockerReference) + "/" + repo, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -164,7 +164,7 @@ func (d *ostreeImageDestination) PutBlobWithOptions(ctx context.Context, stream
|
||||||
return private.UploadedBlob{}, err
|
return private.UploadedBlob{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
hash := blobDigest.Hex()
|
hash := blobDigest.Encoded()
|
||||||
d.blobs[hash] = &blobToImport{Size: size, Digest: blobDigest, BlobPath: blobPath}
|
d.blobs[hash] = &blobToImport{Size: size, Digest: blobDigest, BlobPath: blobPath}
|
||||||
return private.UploadedBlob{Digest: blobDigest, Size: size}, nil
|
return private.UploadedBlob{Digest: blobDigest, Size: size}, nil
|
||||||
}
|
}
|
||||||
|
@ -282,8 +282,8 @@ func generateTarSplitMetadata(output *bytes.Buffer, file string) (digest.Digest,
|
||||||
func (d *ostreeImageDestination) importBlob(selinuxHnd *C.struct_selabel_handle, repo *otbuiltin.Repo, blob *blobToImport) error {
|
func (d *ostreeImageDestination) importBlob(selinuxHnd *C.struct_selabel_handle, repo *otbuiltin.Repo, blob *blobToImport) error {
|
||||||
// TODO: This can take quite some time, and should ideally be cancellable using a context.Context.
|
// TODO: This can take quite some time, and should ideally be cancellable using a context.Context.
|
||||||
|
|
||||||
ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex())
|
ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Encoded())
|
||||||
destinationPath := filepath.Join(d.tmpDirPath, blob.Digest.Hex(), "root")
|
destinationPath := filepath.Join(d.tmpDirPath, blob.Digest.Encoded(), "root")
|
||||||
if err := ensureDirectoryExists(destinationPath); err != nil {
|
if err := ensureDirectoryExists(destinationPath); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -323,7 +323,7 @@ func (d *ostreeImageDestination) importBlob(selinuxHnd *C.struct_selabel_handle,
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobToImport) error {
|
func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobToImport) error {
|
||||||
ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex())
|
ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Encoded())
|
||||||
destinationPath := filepath.Dir(blob.BlobPath)
|
destinationPath := filepath.Dir(blob.BlobPath)
|
||||||
|
|
||||||
return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size)})
|
return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size)})
|
||||||
|
@ -348,10 +348,10 @@ func (d *ostreeImageDestination) TryReusingBlobWithOptions(ctx context.Context,
|
||||||
d.repo = repo
|
d.repo = repo
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := info.Digest.Validate(); err != nil { // digest.Digest.Hex() panics on failure, so validate explicitly.
|
if err := info.Digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
|
||||||
return false, private.ReusedBlob{}, err
|
return false, private.ReusedBlob{}, err
|
||||||
}
|
}
|
||||||
branch := fmt.Sprintf("ociimage/%s", info.Digest.Hex())
|
branch := fmt.Sprintf("ociimage/%s", info.Digest.Encoded())
|
||||||
|
|
||||||
found, data, err := readMetadata(d.repo, branch, "docker.uncompressed_digest")
|
found, data, err := readMetadata(d.repo, branch, "docker.uncompressed_digest")
|
||||||
if err != nil || !found {
|
if err != nil || !found {
|
||||||
|
@ -479,7 +479,7 @@ func (d *ostreeImageDestination) Commit(context.Context, types.UnparsedImage) er
|
||||||
if err := layer.Digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
|
if err := layer.Digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
hash := layer.Digest.Hex()
|
hash := layer.Digest.Encoded()
|
||||||
if err = checkLayer(hash); err != nil {
|
if err = checkLayer(hash); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -488,7 +488,7 @@ func (d *ostreeImageDestination) Commit(context.Context, types.UnparsedImage) er
|
||||||
if err := layer.BlobSum.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
|
if err := layer.BlobSum.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
hash := layer.BlobSum.Hex()
|
hash := layer.BlobSum.Encoded()
|
||||||
if err = checkLayer(hash); err != nil {
|
if err = checkLayer(hash); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -289,7 +289,7 @@ func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, ca
|
||||||
if err := info.Digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
|
if err := info.Digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
|
||||||
return nil, -1, err
|
return nil, -1, err
|
||||||
}
|
}
|
||||||
blob := info.Digest.Hex()
|
blob := info.Digest.Encoded()
|
||||||
|
|
||||||
// Ensure s.compressed is initialized. It is build by LayerInfosForCopy.
|
// Ensure s.compressed is initialized. It is build by LayerInfosForCopy.
|
||||||
if s.compressed == nil {
|
if s.compressed == nil {
|
||||||
|
@ -301,7 +301,7 @@ func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, ca
|
||||||
}
|
}
|
||||||
compressedBlob, isCompressed := s.compressed[info.Digest]
|
compressedBlob, isCompressed := s.compressed[info.Digest]
|
||||||
if isCompressed {
|
if isCompressed {
|
||||||
blob = compressedBlob.Hex()
|
blob = compressedBlob.Encoded()
|
||||||
}
|
}
|
||||||
branch := fmt.Sprintf("ociimage/%s", blob)
|
branch := fmt.Sprintf("ociimage/%s", blob)
|
||||||
|
|
||||||
|
@ -424,7 +424,7 @@ func (s *ostreeImageSource) LayerInfosForCopy(ctx context.Context, instanceDiges
|
||||||
layerBlobs := man.LayerInfos()
|
layerBlobs := man.LayerInfos()
|
||||||
|
|
||||||
for _, layerBlob := range layerBlobs {
|
for _, layerBlob := range layerBlobs {
|
||||||
branch := fmt.Sprintf("ociimage/%s", layerBlob.Digest.Hex())
|
branch := fmt.Sprintf("ociimage/%s", layerBlob.Digest.Encoded())
|
||||||
found, uncompressedDigestStr, err := readMetadata(s.repo, branch, "docker.uncompressed_digest")
|
found, uncompressedDigestStr, err := readMetadata(s.repo, branch, "docker.uncompressed_digest")
|
||||||
if err != nil || !found {
|
if err != nil || !found {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -439,7 +439,10 @@ func (s *ostreeImageSource) LayerInfosForCopy(ctx context.Context, instanceDiges
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
uncompressedDigest := digest.Digest(uncompressedDigestStr)
|
uncompressedDigest, err := digest.Parse(uncompressedDigestStr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
blobInfo := types.BlobInfo{
|
blobInfo := types.BlobInfo{
|
||||||
Digest: uncompressedDigest,
|
Digest: uncompressedDigest,
|
||||||
Size: uncompressedSize,
|
Size: uncompressedSize,
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"math"
|
||||||
"os"
|
"os"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
@ -116,6 +117,63 @@ func (s *blobCacheSource) GetSignaturesWithFormat(ctx context.Context, instanceD
|
||||||
return s.source.GetSignaturesWithFormat(ctx, instanceDigest)
|
return s.source.GetSignaturesWithFormat(ctx, instanceDigest)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// layerInfoForCopy returns a possibly-updated version of info for LayerInfosForCopy
|
||||||
|
func (s *blobCacheSource) layerInfoForCopy(info types.BlobInfo) (types.BlobInfo, error) {
|
||||||
|
var replaceDigestBytes []byte
|
||||||
|
blobFile, err := s.reference.blobPath(info.Digest, false)
|
||||||
|
if err != nil {
|
||||||
|
return types.BlobInfo{}, err
|
||||||
|
}
|
||||||
|
switch s.reference.compress {
|
||||||
|
case types.Compress:
|
||||||
|
replaceDigestBytes, err = os.ReadFile(blobFile + compressedNote)
|
||||||
|
case types.Decompress:
|
||||||
|
replaceDigestBytes, err = os.ReadFile(blobFile + decompressedNote)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return info, nil
|
||||||
|
}
|
||||||
|
replaceDigest, err := digest.Parse(string(replaceDigestBytes))
|
||||||
|
if err != nil {
|
||||||
|
return info, nil
|
||||||
|
}
|
||||||
|
alternate, err := s.reference.blobPath(replaceDigest, false)
|
||||||
|
if err != nil {
|
||||||
|
return types.BlobInfo{}, err
|
||||||
|
}
|
||||||
|
fileInfo, err := os.Stat(alternate)
|
||||||
|
if err != nil {
|
||||||
|
return info, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
switch info.MediaType {
|
||||||
|
case v1.MediaTypeImageLayer, v1.MediaTypeImageLayerGzip:
|
||||||
|
switch s.reference.compress {
|
||||||
|
case types.Compress:
|
||||||
|
info.MediaType = v1.MediaTypeImageLayerGzip
|
||||||
|
info.CompressionAlgorithm = &compression.Gzip
|
||||||
|
case types.Decompress: // FIXME: This should remove zstd:chunked annotations (but those annotations being left with incorrect values should not break pulls)
|
||||||
|
info.MediaType = v1.MediaTypeImageLayer
|
||||||
|
info.CompressionAlgorithm = nil
|
||||||
|
}
|
||||||
|
case manifest.DockerV2SchemaLayerMediaTypeUncompressed, manifest.DockerV2Schema2LayerMediaType:
|
||||||
|
switch s.reference.compress {
|
||||||
|
case types.Compress:
|
||||||
|
info.MediaType = manifest.DockerV2Schema2LayerMediaType
|
||||||
|
info.CompressionAlgorithm = &compression.Gzip
|
||||||
|
case types.Decompress:
|
||||||
|
// nope, not going to suggest anything, it's not allowed by the spec
|
||||||
|
return info, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
logrus.Debugf("suggesting cached blob with digest %q, type %q, and compression %v in place of blob with digest %q", replaceDigest.String(), info.MediaType, s.reference.compress, info.Digest.String())
|
||||||
|
info.CompressionOperation = s.reference.compress
|
||||||
|
info.Digest = replaceDigest
|
||||||
|
info.Size = fileInfo.Size()
|
||||||
|
logrus.Debugf("info = %#v", info)
|
||||||
|
return info, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (s *blobCacheSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) {
|
func (s *blobCacheSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) {
|
||||||
signatures, err := s.source.GetSignaturesWithFormat(ctx, instanceDigest)
|
signatures, err := s.source.GetSignaturesWithFormat(ctx, instanceDigest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -138,55 +196,10 @@ func (s *blobCacheSource) LayerInfosForCopy(ctx context.Context, instanceDigest
|
||||||
if canReplaceBlobs && s.reference.compress != types.PreserveOriginal {
|
if canReplaceBlobs && s.reference.compress != types.PreserveOriginal {
|
||||||
replacedInfos := make([]types.BlobInfo, 0, len(infos))
|
replacedInfos := make([]types.BlobInfo, 0, len(infos))
|
||||||
for _, info := range infos {
|
for _, info := range infos {
|
||||||
var replaceDigest []byte
|
info, err = s.layerInfoForCopy(info)
|
||||||
blobFile, err := s.reference.blobPath(info.Digest, false)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
var alternate string
|
|
||||||
switch s.reference.compress {
|
|
||||||
case types.Compress:
|
|
||||||
alternate = blobFile + compressedNote
|
|
||||||
replaceDigest, err = os.ReadFile(alternate)
|
|
||||||
case types.Decompress:
|
|
||||||
alternate = blobFile + decompressedNote
|
|
||||||
replaceDigest, err = os.ReadFile(alternate)
|
|
||||||
}
|
|
||||||
if err == nil && digest.Digest(replaceDigest).Validate() == nil {
|
|
||||||
alternate, err = s.reference.blobPath(digest.Digest(replaceDigest), false)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
fileInfo, err := os.Stat(alternate)
|
|
||||||
if err == nil {
|
|
||||||
switch info.MediaType {
|
|
||||||
case v1.MediaTypeImageLayer, v1.MediaTypeImageLayerGzip:
|
|
||||||
switch s.reference.compress {
|
|
||||||
case types.Compress:
|
|
||||||
info.MediaType = v1.MediaTypeImageLayerGzip
|
|
||||||
info.CompressionAlgorithm = &compression.Gzip
|
|
||||||
case types.Decompress: // FIXME: This should remove zstd:chunked annotations (but those annotations being left with incorrect values should not break pulls)
|
|
||||||
info.MediaType = v1.MediaTypeImageLayer
|
|
||||||
info.CompressionAlgorithm = nil
|
|
||||||
}
|
|
||||||
case manifest.DockerV2SchemaLayerMediaTypeUncompressed, manifest.DockerV2Schema2LayerMediaType:
|
|
||||||
switch s.reference.compress {
|
|
||||||
case types.Compress:
|
|
||||||
info.MediaType = manifest.DockerV2Schema2LayerMediaType
|
|
||||||
info.CompressionAlgorithm = &compression.Gzip
|
|
||||||
case types.Decompress:
|
|
||||||
// nope, not going to suggest anything, it's not allowed by the spec
|
|
||||||
replacedInfos = append(replacedInfos, info)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
logrus.Debugf("suggesting cached blob with digest %q, type %q, and compression %v in place of blob with digest %q", string(replaceDigest), info.MediaType, s.reference.compress, info.Digest.String())
|
|
||||||
info.CompressionOperation = s.reference.compress
|
|
||||||
info.Digest = digest.Digest(replaceDigest)
|
|
||||||
info.Size = fileInfo.Size()
|
|
||||||
logrus.Debugf("info = %#v", info)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
replacedInfos = append(replacedInfos, info)
|
replacedInfos = append(replacedInfos, info)
|
||||||
}
|
}
|
||||||
infos = replacedInfos
|
infos = replacedInfos
|
||||||
|
@ -214,9 +227,15 @@ func streamChunksFromFile(streams chan io.ReadCloser, errs chan error, file io.R
|
||||||
errs <- err
|
errs <- err
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
var stream io.Reader
|
||||||
|
if c.Length != math.MaxUint64 {
|
||||||
|
stream = io.LimitReader(file, int64(c.Length))
|
||||||
|
} else {
|
||||||
|
stream = file
|
||||||
|
}
|
||||||
s := signalCloseReader{
|
s := signalCloseReader{
|
||||||
closed: make(chan struct{}),
|
closed: make(chan struct{}),
|
||||||
stream: io.LimitReader(file, int64(c.Length)),
|
stream: stream,
|
||||||
}
|
}
|
||||||
streams <- s
|
streams <- s
|
||||||
|
|
||||||
|
@ -244,6 +263,8 @@ func (s signalCloseReader) Close() error {
|
||||||
// The specified chunks must be not overlapping and sorted by their offset.
|
// The specified chunks must be not overlapping and sorted by their offset.
|
||||||
// The readers must be fully consumed, in the order they are returned, before blocking
|
// The readers must be fully consumed, in the order they are returned, before blocking
|
||||||
// to read the next chunk.
|
// to read the next chunk.
|
||||||
|
// If the Length for the last chunk is set to math.MaxUint64, then it
|
||||||
|
// fully fetches the remaining data from the offset to the end of the blob.
|
||||||
func (s *blobCacheSource) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
|
func (s *blobCacheSource) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
|
||||||
blobPath, _, _, err := s.reference.findBlob(info)
|
blobPath, _, _, err := s.reference.findBlob(info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -111,7 +111,7 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref sifRefere
|
||||||
History: []imgspecv1.History{
|
History: []imgspecv1.History{
|
||||||
{
|
{
|
||||||
Created: &created,
|
Created: &created,
|
||||||
CreatedBy: fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", layerDigest.Hex(), os.PathSeparator),
|
CreatedBy: fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", layerDigest.Encoded(), os.PathSeparator),
|
||||||
Comment: "imported from SIF, uuid: " + sifImg.ID(),
|
Comment: "imported from SIF, uuid: " + sifImg.ID(),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
|
@ -76,10 +76,10 @@ func VerifyImageManifestSignatureUsingKeyIdentityList(unverifiedSignature, unver
|
||||||
validateSignedDockerReference: func(signedDockerReference string) error {
|
validateSignedDockerReference: func(signedDockerReference string) error {
|
||||||
signedRef, err := reference.ParseNormalizedNamed(signedDockerReference)
|
signedRef, err := reference.ParseNormalizedNamed(signedDockerReference)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return internal.NewInvalidSignatureError(fmt.Sprintf("Invalid docker reference %s in signature", signedDockerReference))
|
return internal.NewInvalidSignatureError(fmt.Sprintf("Invalid docker reference %q in signature", signedDockerReference))
|
||||||
}
|
}
|
||||||
if signedRef.String() != expectedRef.String() {
|
if signedRef.String() != expectedRef.String() {
|
||||||
return internal.NewInvalidSignatureError(fmt.Sprintf("Docker reference %s does not match %s",
|
return internal.NewInvalidSignatureError(fmt.Sprintf("Docker reference %q does not match %q",
|
||||||
signedDockerReference, expectedDockerReference))
|
signedDockerReference, expectedDockerReference))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -178,7 +178,7 @@ func (f *fulcioTrustRoot) verifyFulcioCertificateAtTime(relevantTime time.Time,
|
||||||
|
|
||||||
// == Validate the OIDC subject
|
// == Validate the OIDC subject
|
||||||
if !slices.Contains(untrustedCertificate.EmailAddresses, f.subjectEmail) {
|
if !slices.Contains(untrustedCertificate.EmailAddresses, f.subjectEmail) {
|
||||||
return nil, internal.NewInvalidSignatureError(fmt.Sprintf("Required email %s not found (got %#v)",
|
return nil, internal.NewInvalidSignatureError(fmt.Sprintf("Required email %q not found (got %q)",
|
||||||
f.subjectEmail,
|
f.subjectEmail,
|
||||||
untrustedCertificate.EmailAddresses))
|
untrustedCertificate.EmailAddresses))
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,7 +31,7 @@ func ParanoidUnmarshalJSONObject(data []byte, fieldResolver func(string) any) er
|
||||||
return JSONFormatError(err.Error())
|
return JSONFormatError(err.Error())
|
||||||
}
|
}
|
||||||
if t != json.Delim('{') {
|
if t != json.Delim('{') {
|
||||||
return JSONFormatError(fmt.Sprintf("JSON object expected, got \"%s\"", t))
|
return JSONFormatError(fmt.Sprintf("JSON object expected, got %#v", t))
|
||||||
}
|
}
|
||||||
for {
|
for {
|
||||||
t, err := dec.Token()
|
t, err := dec.Token()
|
||||||
|
@ -45,16 +45,16 @@ func ParanoidUnmarshalJSONObject(data []byte, fieldResolver func(string) any) er
|
||||||
key, ok := t.(string)
|
key, ok := t.(string)
|
||||||
if !ok {
|
if !ok {
|
||||||
// Coverage: This should never happen, dec.Token() rejects non-string-literals in this state.
|
// Coverage: This should never happen, dec.Token() rejects non-string-literals in this state.
|
||||||
return JSONFormatError(fmt.Sprintf("Key string literal expected, got \"%s\"", t))
|
return JSONFormatError(fmt.Sprintf("Key string literal expected, got %#v", t))
|
||||||
}
|
}
|
||||||
if seenKeys.Contains(key) {
|
if seenKeys.Contains(key) {
|
||||||
return JSONFormatError(fmt.Sprintf("Duplicate key \"%s\"", key))
|
return JSONFormatError(fmt.Sprintf("Duplicate key %q", key))
|
||||||
}
|
}
|
||||||
seenKeys.Add(key)
|
seenKeys.Add(key)
|
||||||
|
|
||||||
valuePtr := fieldResolver(key)
|
valuePtr := fieldResolver(key)
|
||||||
if valuePtr == nil {
|
if valuePtr == nil {
|
||||||
return JSONFormatError(fmt.Sprintf("Unknown key \"%s\"", key))
|
return JSONFormatError(fmt.Sprintf("Unknown key %q", key))
|
||||||
}
|
}
|
||||||
// This works like json.Unmarshal, in particular it allows us to implement UnmarshalJSON to implement strict parsing of the field value.
|
// This works like json.Unmarshal, in particular it allows us to implement UnmarshalJSON to implement strict parsing of the field value.
|
||||||
if err := dec.Decode(valuePtr); err != nil {
|
if err := dec.Decode(valuePtr); err != nil {
|
||||||
|
@ -83,7 +83,7 @@ func ParanoidUnmarshalJSONObjectExactFields(data []byte, exactFields map[string]
|
||||||
}
|
}
|
||||||
for key := range exactFields {
|
for key := range exactFields {
|
||||||
if !seenKeys.Contains(key) {
|
if !seenKeys.Contains(key) {
|
||||||
return JSONFormatError(fmt.Sprintf(`Key "%s" missing in a JSON object`, key))
|
return JSONFormatError(fmt.Sprintf(`Key %q missing in a JSON object`, key))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -150,7 +150,11 @@ func (s *UntrustedSigstorePayload) strictUnmarshalJSON(data []byte) error {
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
s.untrustedDockerManifestDigest = digest.Digest(digestString)
|
digestValue, err := digest.Parse(digestString)
|
||||||
|
if err != nil {
|
||||||
|
return NewInvalidSignatureError(fmt.Sprintf(`invalid docker-manifest-digest value %q: %v`, digestString, err))
|
||||||
|
}
|
||||||
|
s.untrustedDockerManifestDigest = digestValue
|
||||||
|
|
||||||
return ParanoidUnmarshalJSONObjectExactFields(identity, map[string]any{
|
return ParanoidUnmarshalJSONObjectExactFields(identity, map[string]any{
|
||||||
"docker-reference": &s.untrustedDockerReference,
|
"docker-reference": &s.untrustedDockerReference,
|
||||||
|
|
|
@ -247,7 +247,7 @@ func newPolicyRequirementFromJSON(data []byte) (PolicyRequirement, error) {
|
||||||
case prTypeSigstoreSigned:
|
case prTypeSigstoreSigned:
|
||||||
res = &prSigstoreSigned{}
|
res = &prSigstoreSigned{}
|
||||||
default:
|
default:
|
||||||
return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy requirement type \"%s\"", typeField.Type))
|
return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy requirement type %q", typeField.Type))
|
||||||
}
|
}
|
||||||
if err := json.Unmarshal(data, &res); err != nil {
|
if err := json.Unmarshal(data, &res); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -279,7 +279,7 @@ func (pr *prInsecureAcceptAnything) UnmarshalJSON(data []byte) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if tmp.Type != prTypeInsecureAcceptAnything {
|
if tmp.Type != prTypeInsecureAcceptAnything {
|
||||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
|
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type))
|
||||||
}
|
}
|
||||||
*pr = *newPRInsecureAcceptAnything()
|
*pr = *newPRInsecureAcceptAnything()
|
||||||
return nil
|
return nil
|
||||||
|
@ -309,7 +309,7 @@ func (pr *prReject) UnmarshalJSON(data []byte) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if tmp.Type != prTypeReject {
|
if tmp.Type != prTypeReject {
|
||||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
|
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type))
|
||||||
}
|
}
|
||||||
*pr = *newPRReject()
|
*pr = *newPRReject()
|
||||||
return nil
|
return nil
|
||||||
|
@ -318,7 +318,7 @@ func (pr *prReject) UnmarshalJSON(data []byte) error {
|
||||||
// newPRSignedBy returns a new prSignedBy if parameters are valid.
|
// newPRSignedBy returns a new prSignedBy if parameters are valid.
|
||||||
func newPRSignedBy(keyType sbKeyType, keyPath string, keyPaths []string, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) {
|
func newPRSignedBy(keyType sbKeyType, keyPath string, keyPaths []string, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) {
|
||||||
if !keyType.IsValid() {
|
if !keyType.IsValid() {
|
||||||
return nil, InvalidPolicyFormatError(fmt.Sprintf("invalid keyType \"%s\"", keyType))
|
return nil, InvalidPolicyFormatError(fmt.Sprintf("invalid keyType %q", keyType))
|
||||||
}
|
}
|
||||||
keySources := 0
|
keySources := 0
|
||||||
if keyPath != "" {
|
if keyPath != "" {
|
||||||
|
@ -410,7 +410,7 @@ func (pr *prSignedBy) UnmarshalJSON(data []byte) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if tmp.Type != prTypeSignedBy {
|
if tmp.Type != prTypeSignedBy {
|
||||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
|
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type))
|
||||||
}
|
}
|
||||||
if signedIdentity == nil {
|
if signedIdentity == nil {
|
||||||
tmp.SignedIdentity = NewPRMMatchRepoDigestOrExact()
|
tmp.SignedIdentity = NewPRMMatchRepoDigestOrExact()
|
||||||
|
@ -466,7 +466,7 @@ func (kt *sbKeyType) UnmarshalJSON(data []byte) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if !sbKeyType(s).IsValid() {
|
if !sbKeyType(s).IsValid() {
|
||||||
return InvalidPolicyFormatError(fmt.Sprintf("Unrecognized keyType value \"%s\"", s))
|
return InvalidPolicyFormatError(fmt.Sprintf("Unrecognized keyType value %q", s))
|
||||||
}
|
}
|
||||||
*kt = sbKeyType(s)
|
*kt = sbKeyType(s)
|
||||||
return nil
|
return nil
|
||||||
|
@ -504,7 +504,7 @@ func (pr *prSignedBaseLayer) UnmarshalJSON(data []byte) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if tmp.Type != prTypeSignedBaseLayer {
|
if tmp.Type != prTypeSignedBaseLayer {
|
||||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
|
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type))
|
||||||
}
|
}
|
||||||
bli, err := newPolicyReferenceMatchFromJSON(baseLayerIdentity)
|
bli, err := newPolicyReferenceMatchFromJSON(baseLayerIdentity)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -540,7 +540,7 @@ func newPolicyReferenceMatchFromJSON(data []byte) (PolicyReferenceMatch, error)
|
||||||
case prmTypeRemapIdentity:
|
case prmTypeRemapIdentity:
|
||||||
res = &prmRemapIdentity{}
|
res = &prmRemapIdentity{}
|
||||||
default:
|
default:
|
||||||
return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy reference match type \"%s\"", typeField.Type))
|
return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy reference match type %q", typeField.Type))
|
||||||
}
|
}
|
||||||
if err := json.Unmarshal(data, &res); err != nil {
|
if err := json.Unmarshal(data, &res); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -572,7 +572,7 @@ func (prm *prmMatchExact) UnmarshalJSON(data []byte) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if tmp.Type != prmTypeMatchExact {
|
if tmp.Type != prmTypeMatchExact {
|
||||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
|
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type))
|
||||||
}
|
}
|
||||||
*prm = *newPRMMatchExact()
|
*prm = *newPRMMatchExact()
|
||||||
return nil
|
return nil
|
||||||
|
@ -602,7 +602,7 @@ func (prm *prmMatchRepoDigestOrExact) UnmarshalJSON(data []byte) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if tmp.Type != prmTypeMatchRepoDigestOrExact {
|
if tmp.Type != prmTypeMatchRepoDigestOrExact {
|
||||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
|
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type))
|
||||||
}
|
}
|
||||||
*prm = *newPRMMatchRepoDigestOrExact()
|
*prm = *newPRMMatchRepoDigestOrExact()
|
||||||
return nil
|
return nil
|
||||||
|
@ -632,7 +632,7 @@ func (prm *prmMatchRepository) UnmarshalJSON(data []byte) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if tmp.Type != prmTypeMatchRepository {
|
if tmp.Type != prmTypeMatchRepository {
|
||||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
|
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type))
|
||||||
}
|
}
|
||||||
*prm = *newPRMMatchRepository()
|
*prm = *newPRMMatchRepository()
|
||||||
return nil
|
return nil
|
||||||
|
@ -642,10 +642,10 @@ func (prm *prmMatchRepository) UnmarshalJSON(data []byte) error {
|
||||||
func newPRMExactReference(dockerReference string) (*prmExactReference, error) {
|
func newPRMExactReference(dockerReference string) (*prmExactReference, error) {
|
||||||
ref, err := reference.ParseNormalizedNamed(dockerReference)
|
ref, err := reference.ParseNormalizedNamed(dockerReference)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerReference %s: %s", dockerReference, err.Error()))
|
return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerReference %q: %s", dockerReference, err.Error()))
|
||||||
}
|
}
|
||||||
if reference.IsNameOnly(ref) {
|
if reference.IsNameOnly(ref) {
|
||||||
return nil, InvalidPolicyFormatError(fmt.Sprintf("dockerReference %s contains neither a tag nor digest", dockerReference))
|
return nil, InvalidPolicyFormatError(fmt.Sprintf("dockerReference %q contains neither a tag nor digest", dockerReference))
|
||||||
}
|
}
|
||||||
return &prmExactReference{
|
return &prmExactReference{
|
||||||
prmCommon: prmCommon{Type: prmTypeExactReference},
|
prmCommon: prmCommon{Type: prmTypeExactReference},
|
||||||
|
@ -673,7 +673,7 @@ func (prm *prmExactReference) UnmarshalJSON(data []byte) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if tmp.Type != prmTypeExactReference {
|
if tmp.Type != prmTypeExactReference {
|
||||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
|
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type))
|
||||||
}
|
}
|
||||||
|
|
||||||
res, err := newPRMExactReference(tmp.DockerReference)
|
res, err := newPRMExactReference(tmp.DockerReference)
|
||||||
|
@ -687,7 +687,7 @@ func (prm *prmExactReference) UnmarshalJSON(data []byte) error {
|
||||||
// newPRMExactRepository is NewPRMExactRepository, except it returns the private type.
|
// newPRMExactRepository is NewPRMExactRepository, except it returns the private type.
|
||||||
func newPRMExactRepository(dockerRepository string) (*prmExactRepository, error) {
|
func newPRMExactRepository(dockerRepository string) (*prmExactRepository, error) {
|
||||||
if _, err := reference.ParseNormalizedNamed(dockerRepository); err != nil {
|
if _, err := reference.ParseNormalizedNamed(dockerRepository); err != nil {
|
||||||
return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerRepository %s: %s", dockerRepository, err.Error()))
|
return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerRepository %q: %s", dockerRepository, err.Error()))
|
||||||
}
|
}
|
||||||
return &prmExactRepository{
|
return &prmExactRepository{
|
||||||
prmCommon: prmCommon{Type: prmTypeExactRepository},
|
prmCommon: prmCommon{Type: prmTypeExactRepository},
|
||||||
|
@ -715,7 +715,7 @@ func (prm *prmExactRepository) UnmarshalJSON(data []byte) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if tmp.Type != prmTypeExactRepository {
|
if tmp.Type != prmTypeExactRepository {
|
||||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
|
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type))
|
||||||
}
|
}
|
||||||
|
|
||||||
res, err := newPRMExactRepository(tmp.DockerRepository)
|
res, err := newPRMExactRepository(tmp.DockerRepository)
|
||||||
|
@ -788,7 +788,7 @@ func (prm *prmRemapIdentity) UnmarshalJSON(data []byte) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if tmp.Type != prmTypeRemapIdentity {
|
if tmp.Type != prmTypeRemapIdentity {
|
||||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
|
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type))
|
||||||
}
|
}
|
||||||
|
|
||||||
res, err := newPRMRemapIdentity(tmp.Prefix, tmp.SignedPrefix)
|
res, err := newPRMRemapIdentity(tmp.Prefix, tmp.SignedPrefix)
|
||||||
|
|
|
@ -176,7 +176,7 @@ func (pr *prSigstoreSigned) UnmarshalJSON(data []byte) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if tmp.Type != prTypeSigstoreSigned {
|
if tmp.Type != prTypeSigstoreSigned {
|
||||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
|
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type))
|
||||||
}
|
}
|
||||||
if signedIdentity == nil {
|
if signedIdentity == nil {
|
||||||
tmp.SignedIdentity = NewPRMMatchRepoDigestOrExact()
|
tmp.SignedIdentity = NewPRMMatchRepoDigestOrExact()
|
||||||
|
|
|
@ -97,7 +97,7 @@ const (
|
||||||
// changeState changes pc.state, or fails if the state is unexpected
|
// changeState changes pc.state, or fails if the state is unexpected
|
||||||
func (pc *PolicyContext) changeState(expected, new policyContextState) error {
|
func (pc *PolicyContext) changeState(expected, new policyContextState) error {
|
||||||
if pc.state != expected {
|
if pc.state != expected {
|
||||||
return fmt.Errorf(`Invalid PolicyContext state, expected "%s", found "%s"`, expected, pc.state)
|
return fmt.Errorf(`Invalid PolicyContext state, expected %q, found %q`, expected, pc.state)
|
||||||
}
|
}
|
||||||
pc.state = new
|
pc.state = new
|
||||||
return nil
|
return nil
|
||||||
|
@ -140,21 +140,21 @@ func (pc *PolicyContext) requirementsForImageRef(ref types.ImageReference) Polic
|
||||||
// Look for a full match.
|
// Look for a full match.
|
||||||
identity := ref.PolicyConfigurationIdentity()
|
identity := ref.PolicyConfigurationIdentity()
|
||||||
if req, ok := transportScopes[identity]; ok {
|
if req, ok := transportScopes[identity]; ok {
|
||||||
logrus.Debugf(` Using transport "%s" policy section %s`, transportName, identity)
|
logrus.Debugf(` Using transport %q policy section %q`, transportName, identity)
|
||||||
return req
|
return req
|
||||||
}
|
}
|
||||||
|
|
||||||
// Look for a match of the possible parent namespaces.
|
// Look for a match of the possible parent namespaces.
|
||||||
for _, name := range ref.PolicyConfigurationNamespaces() {
|
for _, name := range ref.PolicyConfigurationNamespaces() {
|
||||||
if req, ok := transportScopes[name]; ok {
|
if req, ok := transportScopes[name]; ok {
|
||||||
logrus.Debugf(` Using transport "%s" specific policy section %s`, transportName, name)
|
logrus.Debugf(` Using transport %q specific policy section %q`, transportName, name)
|
||||||
return req
|
return req
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Look for a default match for the transport.
|
// Look for a default match for the transport.
|
||||||
if req, ok := transportScopes[""]; ok {
|
if req, ok := transportScopes[""]; ok {
|
||||||
logrus.Debugf(` Using transport "%s" policy section ""`, transportName)
|
logrus.Debugf(` Using transport %q policy section ""`, transportName)
|
||||||
return req
|
return req
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,10 +20,10 @@ func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image priva
|
||||||
case SBKeyTypeGPGKeys:
|
case SBKeyTypeGPGKeys:
|
||||||
case SBKeyTypeSignedByGPGKeys, SBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs:
|
case SBKeyTypeSignedByGPGKeys, SBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs:
|
||||||
// FIXME? Reject this at policy parsing time already?
|
// FIXME? Reject this at policy parsing time already?
|
||||||
return sarRejected, nil, fmt.Errorf(`Unimplemented "keyType" value "%s"`, string(pr.KeyType))
|
return sarRejected, nil, fmt.Errorf(`Unimplemented "keyType" value %q`, string(pr.KeyType))
|
||||||
default:
|
default:
|
||||||
// This should never happen, newPRSignedBy ensures KeyType.IsValid()
|
// This should never happen, newPRSignedBy ensures KeyType.IsValid()
|
||||||
return sarRejected, nil, fmt.Errorf(`Unknown "keyType" value "%s"`, string(pr.KeyType))
|
return sarRejected, nil, fmt.Errorf(`Unknown "keyType" value %q`, string(pr.KeyType))
|
||||||
}
|
}
|
||||||
|
|
||||||
// FIXME: move this to per-context initialization
|
// FIXME: move this to per-context initialization
|
||||||
|
@ -77,7 +77,7 @@ func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image priva
|
||||||
},
|
},
|
||||||
validateSignedDockerReference: func(ref string) error {
|
validateSignedDockerReference: func(ref string) error {
|
||||||
if !pr.SignedIdentity.matchesDockerReference(image, ref) {
|
if !pr.SignedIdentity.matchesDockerReference(image, ref) {
|
||||||
return PolicyRequirementError(fmt.Sprintf("Signature for identity %s is not accepted", ref))
|
return PolicyRequirementError(fmt.Sprintf("Signature for identity %q is not accepted", ref))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
|
@ -123,7 +123,7 @@ func (pr *prSignedBy) isRunningImageAllowed(ctx context.Context, image private.U
|
||||||
// Huh?! This should not happen at all; treat it as any other invalid value.
|
// Huh?! This should not happen at all; treat it as any other invalid value.
|
||||||
fallthrough
|
fallthrough
|
||||||
default:
|
default:
|
||||||
reason = fmt.Errorf(`Internal error: Unexpected signature verification result "%s"`, string(res))
|
reason = fmt.Errorf(`Internal error: Unexpected signature verification result %q`, string(res))
|
||||||
}
|
}
|
||||||
rejections = append(rejections, reason)
|
rejections = append(rejections, reason)
|
||||||
}
|
}
|
||||||
|
|
|
@ -194,7 +194,7 @@ func (pr *prSigstoreSigned) isSignatureAccepted(ctx context.Context, image priva
|
||||||
signature, err := internal.VerifySigstorePayload(publicKey, untrustedPayload, untrustedBase64Signature, internal.SigstorePayloadAcceptanceRules{
|
signature, err := internal.VerifySigstorePayload(publicKey, untrustedPayload, untrustedBase64Signature, internal.SigstorePayloadAcceptanceRules{
|
||||||
ValidateSignedDockerReference: func(ref string) error {
|
ValidateSignedDockerReference: func(ref string) error {
|
||||||
if !pr.SignedIdentity.matchesDockerReference(image, ref) {
|
if !pr.SignedIdentity.matchesDockerReference(image, ref) {
|
||||||
return PolicyRequirementError(fmt.Sprintf("Signature for identity %s is not accepted", ref))
|
return PolicyRequirementError(fmt.Sprintf("Signature for identity %q is not accepted", ref))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
|
@ -253,7 +253,7 @@ func (pr *prSigstoreSigned) isRunningImageAllowed(ctx context.Context, image pri
|
||||||
// Huh?! This should not happen at all; treat it as any other invalid value.
|
// Huh?! This should not happen at all; treat it as any other invalid value.
|
||||||
fallthrough
|
fallthrough
|
||||||
default:
|
default:
|
||||||
reason = fmt.Errorf(`Internal error: Unexpected signature verification result "%s"`, string(res))
|
reason = fmt.Errorf(`Internal error: Unexpected signature verification result %q`, string(res))
|
||||||
}
|
}
|
||||||
rejections = append(rejections, reason)
|
rejections = append(rejections, reason)
|
||||||
}
|
}
|
||||||
|
|
|
@ -136,7 +136,7 @@ func (prm *prmRemapIdentity) remapReferencePrefix(ref reference.Named) (referenc
|
||||||
newNamedRef := strings.Replace(refString, prm.Prefix, prm.SignedPrefix, 1)
|
newNamedRef := strings.Replace(refString, prm.Prefix, prm.SignedPrefix, 1)
|
||||||
newParsedRef, err := reference.ParseNamed(newNamedRef)
|
newParsedRef, err := reference.ParseNamed(newNamedRef)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf(`error rewriting reference from "%s" to "%s": %v`, refString, newNamedRef, err)
|
return nil, fmt.Errorf(`error rewriting reference from %q to %q: %v`, refString, newNamedRef, err)
|
||||||
}
|
}
|
||||||
return newParsedRef, nil
|
return newParsedRef, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -173,7 +173,11 @@ func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error {
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
s.untrustedDockerManifestDigest = digest.Digest(digestString)
|
digestValue, err := digest.Parse(digestString)
|
||||||
|
if err != nil {
|
||||||
|
return internal.NewInvalidSignatureError(fmt.Sprintf(`invalid docker-manifest-digest value %q: %v`, digestString, err))
|
||||||
|
}
|
||||||
|
s.untrustedDockerManifestDigest = digestValue
|
||||||
|
|
||||||
return internal.ParanoidUnmarshalJSONObjectExactFields(identity, map[string]any{
|
return internal.ParanoidUnmarshalJSONObjectExactFields(identity, map[string]any{
|
||||||
"docker-reference": &s.untrustedDockerReference,
|
"docker-reference": &s.untrustedDockerReference,
|
||||||
|
|
|
@ -59,7 +59,7 @@ type storageImageDestination struct {
|
||||||
nextTempFileID atomic.Int32 // A counter that we use for computing filenames to assign to blobs
|
nextTempFileID atomic.Int32 // A counter that we use for computing filenames to assign to blobs
|
||||||
manifest []byte // Manifest contents, temporary
|
manifest []byte // Manifest contents, temporary
|
||||||
manifestDigest digest.Digest // Valid if len(manifest) != 0
|
manifestDigest digest.Digest // Valid if len(manifest) != 0
|
||||||
untrustedDiffIDValues []digest.Digest // From config’s RootFS.DiffIDs, valid if not nil
|
untrustedDiffIDValues []digest.Digest // From config’s RootFS.DiffIDs (not even validated to be valid digest.Digest!); or nil if not read yet
|
||||||
signatures []byte // Signature contents, temporary
|
signatures []byte // Signature contents, temporary
|
||||||
signatureses map[digest.Digest][]byte // Instance signature contents, temporary
|
signatureses map[digest.Digest][]byte // Instance signature contents, temporary
|
||||||
metadata storageImageMetadata // Metadata contents being built
|
metadata storageImageMetadata // Metadata contents being built
|
||||||
|
@ -94,11 +94,11 @@ type storageImageDestinationLockProtected struct {
|
||||||
blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs
|
blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs
|
||||||
indexToTOCDigest map[int]digest.Digest // Mapping from layer index to a TOC Digest, IFF the layer was created/found/reused by TOC digest
|
indexToTOCDigest map[int]digest.Digest // Mapping from layer index to a TOC Digest, IFF the layer was created/found/reused by TOC digest
|
||||||
|
|
||||||
// Layer data: Before commitLayer is called, either at least one of (diffOutputs, blobAdditionalLayer, filenames)
|
// Layer data: Before commitLayer is called, either at least one of (diffOutputs, indexToAdditionalLayer, filenames)
|
||||||
// should be available; or indexToTOCDigest/blobDiffIDs should be enough to locate an existing c/storage layer.
|
// should be available; or indexToTOCDigest/blobDiffIDs should be enough to locate an existing c/storage layer.
|
||||||
// They are looked up in the order they are mentioned above.
|
// They are looked up in the order they are mentioned above.
|
||||||
diffOutputs map[int]*graphdriver.DriverWithDifferOutput // Mapping from layer index to a partially-pulled layer intermediate data
|
diffOutputs map[int]*graphdriver.DriverWithDifferOutput // Mapping from layer index to a partially-pulled layer intermediate data
|
||||||
blobAdditionalLayer map[digest.Digest]storage.AdditionalLayer // Mapping from layer blobsums to their corresponding additional layer
|
indexToAdditionalLayer map[int]storage.AdditionalLayer // Mapping from layer index to their corresponding additional layer
|
||||||
// Mapping from layer blobsums to names of files we used to hold them. If set, fileSizes and blobDiffIDs must also be set.
|
// Mapping from layer blobsums to names of files we used to hold them. If set, fileSizes and blobDiffIDs must also be set.
|
||||||
filenames map[digest.Digest]string
|
filenames map[digest.Digest]string
|
||||||
// Mapping from layer blobsums to their sizes. If set, filenames and blobDiffIDs must also be set.
|
// Mapping from layer blobsums to their sizes. If set, filenames and blobDiffIDs must also be set.
|
||||||
|
@ -145,13 +145,13 @@ func newImageDestination(sys *types.SystemContext, imageRef storageReference) (*
|
||||||
},
|
},
|
||||||
indexToStorageID: make(map[int]string),
|
indexToStorageID: make(map[int]string),
|
||||||
lockProtected: storageImageDestinationLockProtected{
|
lockProtected: storageImageDestinationLockProtected{
|
||||||
indexToAddedLayerInfo: make(map[int]addedLayerInfo),
|
indexToAddedLayerInfo: make(map[int]addedLayerInfo),
|
||||||
blobDiffIDs: make(map[digest.Digest]digest.Digest),
|
blobDiffIDs: make(map[digest.Digest]digest.Digest),
|
||||||
indexToTOCDigest: make(map[int]digest.Digest),
|
indexToTOCDigest: make(map[int]digest.Digest),
|
||||||
diffOutputs: make(map[int]*graphdriver.DriverWithDifferOutput),
|
diffOutputs: make(map[int]*graphdriver.DriverWithDifferOutput),
|
||||||
blobAdditionalLayer: make(map[digest.Digest]storage.AdditionalLayer),
|
indexToAdditionalLayer: make(map[int]storage.AdditionalLayer),
|
||||||
filenames: make(map[digest.Digest]string),
|
filenames: make(map[digest.Digest]string),
|
||||||
fileSizes: make(map[digest.Digest]int64),
|
fileSizes: make(map[digest.Digest]int64),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
dest.Compat = impl.AddCompat(dest)
|
dest.Compat = impl.AddCompat(dest)
|
||||||
|
@ -167,13 +167,11 @@ func (s *storageImageDestination) Reference() types.ImageReference {
|
||||||
// Close cleans up the temporary directory and additional layer store handlers.
|
// Close cleans up the temporary directory and additional layer store handlers.
|
||||||
func (s *storageImageDestination) Close() error {
|
func (s *storageImageDestination) Close() error {
|
||||||
// This is outside of the scope of HasThreadSafePutBlob, so we don’t need to hold s.lock.
|
// This is outside of the scope of HasThreadSafePutBlob, so we don’t need to hold s.lock.
|
||||||
for _, al := range s.lockProtected.blobAdditionalLayer {
|
for _, al := range s.lockProtected.indexToAdditionalLayer {
|
||||||
al.Release()
|
al.Release()
|
||||||
}
|
}
|
||||||
for _, v := range s.lockProtected.diffOutputs {
|
for _, v := range s.lockProtected.diffOutputs {
|
||||||
if v.Target != "" {
|
_ = s.imageRef.transport.store.CleanupStagedLayer(v)
|
||||||
_ = s.imageRef.transport.store.CleanupStagedLayer(v)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return os.RemoveAll(s.directory)
|
return os.RemoveAll(s.directory)
|
||||||
}
|
}
|
||||||
|
@ -310,6 +308,12 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return private.UploadedBlob{}, err
|
return private.UploadedBlob{}, err
|
||||||
}
|
}
|
||||||
|
succeeded := false
|
||||||
|
defer func() {
|
||||||
|
if !succeeded {
|
||||||
|
_ = s.imageRef.transport.store.CleanupStagedLayer(out)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
if out.TOCDigest == "" && out.UncompressedDigest == "" {
|
if out.TOCDigest == "" && out.UncompressedDigest == "" {
|
||||||
return private.UploadedBlob{}, errors.New("internal error: ApplyDiffWithDiffer succeeded with neither TOCDigest nor UncompressedDigest set")
|
return private.UploadedBlob{}, errors.New("internal error: ApplyDiffWithDiffer succeeded with neither TOCDigest nor UncompressedDigest set")
|
||||||
|
@ -332,6 +336,7 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces
|
||||||
s.lockProtected.diffOutputs[options.LayerIndex] = out
|
s.lockProtected.diffOutputs[options.LayerIndex] = out
|
||||||
s.lock.Unlock()
|
s.lock.Unlock()
|
||||||
|
|
||||||
|
succeeded = true
|
||||||
return private.UploadedBlob{
|
return private.UploadedBlob{
|
||||||
Digest: blobDigest,
|
Digest: blobDigest,
|
||||||
Size: srcInfo.Size,
|
Size: srcInfo.Size,
|
||||||
|
@ -377,14 +382,24 @@ func (s *storageImageDestination) tryReusingBlobAsPending(blobDigest digest.Dige
|
||||||
s.lock.Lock()
|
s.lock.Lock()
|
||||||
defer s.lock.Unlock()
|
defer s.lock.Unlock()
|
||||||
|
|
||||||
if options.SrcRef != nil {
|
if options.SrcRef != nil && options.TOCDigest != "" && options.LayerIndex != nil {
|
||||||
// Check if we have the layer in the underlying additional layer store.
|
// Check if we have the layer in the underlying additional layer store.
|
||||||
aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(blobDigest, options.SrcRef.String())
|
aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(options.TOCDigest, options.SrcRef.String())
|
||||||
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
|
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
|
||||||
return false, private.ReusedBlob{}, fmt.Errorf(`looking for compressed layers with digest %q and labels: %w`, blobDigest, err)
|
return false, private.ReusedBlob{}, fmt.Errorf(`looking for compressed layers with digest %q and labels: %w`, blobDigest, err)
|
||||||
} else if err == nil {
|
} else if err == nil {
|
||||||
s.lockProtected.blobDiffIDs[blobDigest] = aLayer.UncompressedDigest()
|
alsTOCDigest := aLayer.TOCDigest()
|
||||||
s.lockProtected.blobAdditionalLayer[blobDigest] = aLayer
|
if alsTOCDigest != options.TOCDigest {
|
||||||
|
// FIXME: If alsTOCDigest is "", the Additional Layer Store FUSE server is probably just too old, and we could
|
||||||
|
// probably go on reading the layer from other sources.
|
||||||
|
//
|
||||||
|
// Currently it should not be possible for alsTOCDigest to be set and not the expected value, but there’s
|
||||||
|
// not that much benefit to checking for equality — we trust the FUSE server to validate the digest either way.
|
||||||
|
return false, private.ReusedBlob{}, fmt.Errorf("additional layer for TOCDigest %q reports unexpected TOCDigest %q",
|
||||||
|
options.TOCDigest, alsTOCDigest)
|
||||||
|
}
|
||||||
|
s.lockProtected.indexToTOCDigest[*options.LayerIndex] = options.TOCDigest
|
||||||
|
s.lockProtected.indexToAdditionalLayer[*options.LayerIndex] = aLayer
|
||||||
return true, private.ReusedBlob{
|
return true, private.ReusedBlob{
|
||||||
Digest: blobDigest,
|
Digest: blobDigest,
|
||||||
Size: aLayer.CompressedSize(),
|
Size: aLayer.CompressedSize(),
|
||||||
|
@ -564,7 +579,7 @@ func (s *storageImageDestination) computeID(m manifest.Manifest) string {
|
||||||
}
|
}
|
||||||
// ordinaryImageID is a digest of a config, which is a JSON value.
|
// ordinaryImageID is a digest of a config, which is a JSON value.
|
||||||
// To avoid the risk of collisions, start the input with @ so that the input is not a valid JSON.
|
// To avoid the risk of collisions, start the input with @ so that the input is not a valid JSON.
|
||||||
tocImageID := digest.FromString("@With TOC:" + tocIDInput).Hex()
|
tocImageID := digest.FromString("@With TOC:" + tocIDInput).Encoded()
|
||||||
logrus.Debugf("Ordinary storage image ID %s; a layer was looked up by TOC, so using image ID %s", ordinaryImageID, tocImageID)
|
logrus.Debugf("Ordinary storage image ID %s; a layer was looked up by TOC, so using image ID %s", ordinaryImageID, tocImageID)
|
||||||
return tocImageID
|
return tocImageID
|
||||||
}
|
}
|
||||||
|
@ -651,11 +666,11 @@ func (s *storageImageDestination) singleLayerIDComponent(layerIndex int, blobDig
|
||||||
defer s.lock.Unlock()
|
defer s.lock.Unlock()
|
||||||
|
|
||||||
if d, found := s.lockProtected.indexToTOCDigest[layerIndex]; found {
|
if d, found := s.lockProtected.indexToTOCDigest[layerIndex]; found {
|
||||||
return "@TOC=" + d.Hex(), false // "@" is not a valid start of a digest.Digest, so this is unambiguous.
|
return "@TOC=" + d.Encoded(), false // "@" is not a valid start of a digest.Digest, so this is unambiguous.
|
||||||
}
|
}
|
||||||
|
|
||||||
if d, found := s.lockProtected.blobDiffIDs[blobDigest]; found {
|
if d, found := s.lockProtected.blobDiffIDs[blobDigest]; found {
|
||||||
return d.Hex(), true // This looks like chain IDs, and it uses the traditional value.
|
return d.Encoded(), true // This looks like chain IDs, and it uses the traditional value.
|
||||||
}
|
}
|
||||||
return "", false
|
return "", false
|
||||||
}
|
}
|
||||||
|
@ -731,7 +746,7 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
|
||||||
|
|
||||||
id := layerIDComponent
|
id := layerIDComponent
|
||||||
if !layerIDComponentStandalone || parentLayer != "" {
|
if !layerIDComponentStandalone || parentLayer != "" {
|
||||||
id = digest.Canonical.FromString(parentLayer + "+" + layerIDComponent).Hex()
|
id = digest.Canonical.FromString(parentLayer + "+" + layerIDComponent).Encoded()
|
||||||
}
|
}
|
||||||
if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil {
|
if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil {
|
||||||
// There's already a layer that should have the right contents, just reuse it.
|
// There's already a layer that should have the right contents, just reuse it.
|
||||||
|
@ -767,7 +782,13 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D
|
||||||
logrus.Debugf("Skipping commit for layer %q, manifest not yet available", newLayerID)
|
logrus.Debugf("Skipping commit for layer %q, manifest not yet available", newLayerID)
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
untrustedUncompressedDigest = d
|
untrustedUncompressedDigest = d
|
||||||
|
// While the contents of the digest are untrusted, make sure at least the _format_ is valid,
|
||||||
|
// because we are going to write it to durable storage in expectedLayerDiffIDFlag .
|
||||||
|
if err := untrustedUncompressedDigest.Validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
flags := make(map[string]interface{})
|
flags := make(map[string]interface{})
|
||||||
|
@ -793,7 +814,7 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D
|
||||||
}
|
}
|
||||||
|
|
||||||
s.lock.Lock()
|
s.lock.Lock()
|
||||||
al, ok := s.lockProtected.blobAdditionalLayer[layerDigest]
|
al, ok := s.lockProtected.indexToAdditionalLayer[index]
|
||||||
s.lock.Unlock()
|
s.lock.Unlock()
|
||||||
if ok {
|
if ok {
|
||||||
layer, err := al.PutAs(newLayerID, parentLayer, nil)
|
layer, err := al.PutAs(newLayerID, parentLayer, nil)
|
||||||
|
|
|
@ -107,12 +107,11 @@ func (s *storageImageSource) Close() error {
|
||||||
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
||||||
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
|
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
|
||||||
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
|
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
|
||||||
func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (rc io.ReadCloser, n int64, err error) {
|
func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
|
||||||
// We need a valid digest value.
|
// We need a valid digest value.
|
||||||
digest := info.Digest
|
digest := info.Digest
|
||||||
|
|
||||||
err = digest.Validate()
|
if err := digest.Validate(); err != nil {
|
||||||
if err != nil {
|
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -154,7 +153,7 @@ func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, c
|
||||||
// NOTE: the blob is first written to a temporary file and subsequently
|
// NOTE: the blob is first written to a temporary file and subsequently
|
||||||
// closed. The intention is to keep the time we own the storage lock
|
// closed. The intention is to keep the time we own the storage lock
|
||||||
// as short as possible to allow other processes to access the storage.
|
// as short as possible to allow other processes to access the storage.
|
||||||
rc, n, _, err = s.getBlobAndLayerID(digest, layers)
|
rc, n, _, err := s.getBlobAndLayerID(digest, layers)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
|
@ -177,7 +176,7 @@ func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, c
|
||||||
// On Unix and modern Windows (2022 at least) we can eagerly unlink the file to ensure it's automatically
|
// On Unix and modern Windows (2022 at least) we can eagerly unlink the file to ensure it's automatically
|
||||||
// cleaned up on process termination (or if the caller forgets to invoke Close())
|
// cleaned up on process termination (or if the caller forgets to invoke Close())
|
||||||
// On older versions of Windows we will have to fallback to relying on the caller to invoke Close()
|
// On older versions of Windows we will have to fallback to relying on the caller to invoke Close()
|
||||||
if err := os.Remove(tmpFile.Name()); err != nil {
|
if err := os.Remove(tmpFile.Name()); err == nil {
|
||||||
tmpFileRemovePending = false
|
tmpFileRemovePending = false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -308,9 +307,6 @@ func (s *storageImageSource) LayerInfosForCopy(ctx context.Context, instanceDige
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("reading layer %q in image %q: %w", layerID, s.image.ID, err)
|
return nil, fmt.Errorf("reading layer %q in image %q: %w", layerID, s.image.ID, err)
|
||||||
}
|
}
|
||||||
if layer.UncompressedSize < 0 {
|
|
||||||
return nil, fmt.Errorf("uncompressed size for layer %q is unknown", layerID)
|
|
||||||
}
|
|
||||||
|
|
||||||
blobDigest := layer.UncompressedDigest
|
blobDigest := layer.UncompressedDigest
|
||||||
if blobDigest == "" {
|
if blobDigest == "" {
|
||||||
|
@ -332,12 +328,16 @@ func (s *storageImageSource) LayerInfosForCopy(ctx context.Context, instanceDige
|
||||||
return nil, fmt.Errorf("parsing expected diffID %q for layer %q: %w", expectedDigest, layerID, err)
|
return nil, fmt.Errorf("parsing expected diffID %q for layer %q: %w", expectedDigest, layerID, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
size := layer.UncompressedSize
|
||||||
|
if size < 0 {
|
||||||
|
size = -1
|
||||||
|
}
|
||||||
s.getBlobMutex.Lock()
|
s.getBlobMutex.Lock()
|
||||||
s.getBlobMutexProtected.digestToLayerID[blobDigest] = layer.ID
|
s.getBlobMutexProtected.digestToLayerID[blobDigest] = layer.ID
|
||||||
s.getBlobMutex.Unlock()
|
s.getBlobMutex.Unlock()
|
||||||
blobInfo := types.BlobInfo{
|
blobInfo := types.BlobInfo{
|
||||||
Digest: blobDigest,
|
Digest: blobDigest,
|
||||||
Size: layer.UncompressedSize,
|
Size: size,
|
||||||
MediaType: uncompressedLayerType,
|
MediaType: uncompressedLayerType,
|
||||||
}
|
}
|
||||||
physicalBlobInfos = append([]types.BlobInfo{blobInfo}, physicalBlobInfos...)
|
physicalBlobInfos = append([]types.BlobInfo{blobInfo}, physicalBlobInfos...)
|
||||||
|
@ -453,10 +453,16 @@ func (s *storageImageSource) getSize() (int64, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return -1, err
|
return -1, err
|
||||||
}
|
}
|
||||||
if (layer.TOCDigest == "" && layer.UncompressedDigest == "") || layer.UncompressedSize < 0 {
|
if (layer.TOCDigest == "" && layer.UncompressedDigest == "") || (layer.TOCDigest == "" && layer.UncompressedSize < 0) {
|
||||||
return -1, fmt.Errorf("size for layer %q is unknown, failing getSize()", layerID)
|
return -1, fmt.Errorf("size for layer %q is unknown, failing getSize()", layerID)
|
||||||
}
|
}
|
||||||
sum += layer.UncompressedSize
|
// FIXME: We allow layer.UncompressedSize < 0 above, because currently images in an Additional Layer Store don’t provide that value.
|
||||||
|
// Right now, various callers in Podman (and, also, newImage in this package) don’t expect the size computation to fail.
|
||||||
|
// Should we update the callers, or do we need to continue returning inaccurate information here? Or should we pay the cost
|
||||||
|
// to compute the size from the diff?
|
||||||
|
if layer.UncompressedSize >= 0 {
|
||||||
|
sum += layer.UncompressedSize
|
||||||
|
}
|
||||||
if layer.Parent == "" {
|
if layer.Parent == "" {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
|
@ -117,7 +117,7 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System
|
||||||
|
|
||||||
history = append(history, imgspecv1.History{
|
history = append(history, imgspecv1.History{
|
||||||
Created: &blobTime,
|
Created: &blobTime,
|
||||||
CreatedBy: fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", diffID.Hex(), os.PathSeparator),
|
CreatedBy: fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", diffID.Encoded(), os.PathSeparator),
|
||||||
Comment: comment,
|
Comment: comment,
|
||||||
})
|
})
|
||||||
// Use the mtime of the most recently modified file as the image's creation time.
|
// Use the mtime of the most recently modified file as the image's creation time.
|
||||||
|
|
4
vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go
generated
vendored
4
vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go
generated
vendored
|
@ -28,11 +28,11 @@ func ParseImageName(imgName string) (types.ImageReference, error) {
|
||||||
// Keep this in sync with TransportFromImageName!
|
// Keep this in sync with TransportFromImageName!
|
||||||
transportName, withinTransport, valid := strings.Cut(imgName, ":")
|
transportName, withinTransport, valid := strings.Cut(imgName, ":")
|
||||||
if !valid {
|
if !valid {
|
||||||
return nil, fmt.Errorf(`Invalid image name "%s", expected colon-separated transport:reference`, imgName)
|
return nil, fmt.Errorf(`Invalid image name %q, expected colon-separated transport:reference`, imgName)
|
||||||
}
|
}
|
||||||
transport := transports.Get(transportName)
|
transport := transports.Get(transportName)
|
||||||
if transport == nil {
|
if transport == nil {
|
||||||
return nil, fmt.Errorf(`Invalid image name "%s", unknown transport "%s"`, imgName, transportName)
|
return nil, fmt.Errorf(`Invalid image name %q, unknown transport %q`, imgName, transportName)
|
||||||
}
|
}
|
||||||
return transport.ParseReference(withinTransport)
|
return transport.ParseReference(withinTransport)
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,12 +6,12 @@ const (
|
||||||
// VersionMajor is for an API incompatible changes
|
// VersionMajor is for an API incompatible changes
|
||||||
VersionMajor = 5
|
VersionMajor = 5
|
||||||
// VersionMinor is for functionality in a backwards-compatible manner
|
// VersionMinor is for functionality in a backwards-compatible manner
|
||||||
VersionMinor = 30
|
VersionMinor = 31
|
||||||
// VersionPatch is for backwards-compatible bug fixes
|
// VersionPatch is for backwards-compatible bug fixes
|
||||||
VersionPatch = 2
|
VersionPatch = 0
|
||||||
|
|
||||||
// VersionDev indicates development branch. Releases will be empty string.
|
// VersionDev indicates development branch. Releases will be empty string.
|
||||||
VersionDev = "-dev"
|
VersionDev = ""
|
||||||
)
|
)
|
||||||
|
|
||||||
// Version is the specification version that the package types support.
|
// Version is the specification version that the package types support.
|
||||||
|
|
|
@ -23,7 +23,7 @@ env:
|
||||||
# GCE project where images live
|
# GCE project where images live
|
||||||
IMAGE_PROJECT: "libpod-218412"
|
IMAGE_PROJECT: "libpod-218412"
|
||||||
# VM Image built in containers/automation_images
|
# VM Image built in containers/automation_images
|
||||||
IMAGE_SUFFIX: "c20240320t153921z-f39f38d13"
|
IMAGE_SUFFIX: "c20240513t140131z-f40f39d13"
|
||||||
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
|
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
|
||||||
DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}"
|
DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}"
|
||||||
|
|
||||||
|
@ -126,7 +126,7 @@ lint_task:
|
||||||
folder: $GOPATH/pkg/mod
|
folder: $GOPATH/pkg/mod
|
||||||
build_script: |
|
build_script: |
|
||||||
apt-get update
|
apt-get update
|
||||||
apt-get install -y libbtrfs-dev libdevmapper-dev
|
apt-get install -y libbtrfs-dev
|
||||||
test_script: |
|
test_script: |
|
||||||
make TAGS=regex_precompile local-validate
|
make TAGS=regex_precompile local-validate
|
||||||
make lint
|
make lint
|
||||||
|
@ -171,7 +171,7 @@ vendor_task:
|
||||||
cross_task:
|
cross_task:
|
||||||
alias: cross
|
alias: cross
|
||||||
container:
|
container:
|
||||||
image: golang:1.20
|
image: golang:1.21
|
||||||
build_script: make cross
|
build_script: make cross
|
||||||
|
|
||||||
|
|
||||||
|
@ -191,6 +191,6 @@ success_task:
|
||||||
- vendor
|
- vendor
|
||||||
- cross
|
- cross
|
||||||
container:
|
container:
|
||||||
image: golang:1.20
|
image: golang:1.21
|
||||||
clone_script: 'mkdir -p "$CIRRUS_WORKING_DIR"' # Source code not needed
|
clone_script: 'mkdir -p "$CIRRUS_WORKING_DIR"' # Source code not needed
|
||||||
script: /bin/true
|
script: /bin/true
|
||||||
|
|
|
@ -27,7 +27,7 @@
|
||||||
vendor-in-container
|
vendor-in-container
|
||||||
|
|
||||||
NATIVETAGS :=
|
NATIVETAGS :=
|
||||||
AUTOTAGS := $(shell ./hack/btrfs_tag.sh) $(shell ./hack/libdm_tag.sh) $(shell ./hack/libsubid_tag.sh)
|
AUTOTAGS := $(shell ./hack/btrfs_tag.sh) $(shell ./hack/libsubid_tag.sh)
|
||||||
BUILDFLAGS := -tags "$(AUTOTAGS) $(TAGS)" $(FLAGS)
|
BUILDFLAGS := -tags "$(AUTOTAGS) $(TAGS)" $(FLAGS)
|
||||||
GO ?= go
|
GO ?= go
|
||||||
TESTFLAGS := $(shell $(GO) test -race $(BUILDFLAGS) ./pkg/stringutils 2>&1 > /dev/null && echo -race)
|
TESTFLAGS := $(shell $(GO) test -race $(BUILDFLAGS) ./pkg/stringutils 2>&1 > /dev/null && echo -race)
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
1.53.1-dev
|
1.54.0
|
||||||
|
|
|
@ -208,8 +208,6 @@ type LayerStore interface {
|
||||||
ParentOwners(id string) (uids, gids []int, err error)
|
ParentOwners(id string) (uids, gids []int, err error)
|
||||||
ApplyDiff(to string, diff io.Reader) (int64, error)
|
ApplyDiff(to string, diff io.Reader) (int64, error)
|
||||||
ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error)
|
ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error)
|
||||||
CleanupStagingDirectory(stagingDirectory string) error
|
|
||||||
ApplyDiffFromStagingDirectory(id, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error
|
|
||||||
DifferTarget(id string) (string, error)
|
DifferTarget(id string) (string, error)
|
||||||
LoadLocked() error
|
LoadLocked() error
|
||||||
PutAdditionalLayer(id string, parentLayer *Layer, names []string, aLayer drivers.AdditionalLayer) (layer *Layer, err error)
|
PutAdditionalLayer(id string, parentLayer *Layer, names []string, aLayer drivers.AdditionalLayer) (layer *Layer, err error)
|
||||||
|
|
|
@ -1,254 +0,0 @@
|
||||||
//go:build linux && cgo
|
|
||||||
// +build linux,cgo
|
|
||||||
|
|
||||||
package devmapper
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"bytes"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
)
|
|
||||||
|
|
||||||
type directLVMConfig struct {
|
|
||||||
Device string
|
|
||||||
ThinpPercent uint64
|
|
||||||
ThinpMetaPercent uint64
|
|
||||||
AutoExtendPercent uint64
|
|
||||||
AutoExtendThreshold uint64
|
|
||||||
MetaDataSize string
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
errThinpPercentMissing = errors.New("must set both `dm.thinp_percent` and `dm.thinp_metapercent` if either is specified")
|
|
||||||
errThinpPercentTooBig = errors.New("combined `dm.thinp_percent` and `dm.thinp_metapercent` must not be greater than 100")
|
|
||||||
errMissingSetupDevice = errors.New("must provide device path in `dm.directlvm_device` in order to configure direct-lvm")
|
|
||||||
)
|
|
||||||
|
|
||||||
func validateLVMConfig(cfg directLVMConfig) error {
|
|
||||||
if cfg.Device == "" {
|
|
||||||
return errMissingSetupDevice
|
|
||||||
}
|
|
||||||
if (cfg.ThinpPercent > 0 && cfg.ThinpMetaPercent == 0) || cfg.ThinpMetaPercent > 0 && cfg.ThinpPercent == 0 {
|
|
||||||
return errThinpPercentMissing
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.ThinpPercent+cfg.ThinpMetaPercent > 100 {
|
|
||||||
return errThinpPercentTooBig
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkDevAvailable(dev string) error {
|
|
||||||
lvmScan, err := exec.LookPath("lvmdiskscan")
|
|
||||||
if err != nil {
|
|
||||||
logrus.Debugf("could not find lvmdiskscan: %v", err)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
out, err := exec.Command(lvmScan).CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
logrus.WithError(err).Error(string(out))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if !bytes.Contains(out, []byte(dev)) {
|
|
||||||
return fmt.Errorf("%s is not available for use with devicemapper", dev)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkDevInVG(dev string) error {
|
|
||||||
pvDisplay, err := exec.LookPath("pvdisplay")
|
|
||||||
if err != nil {
|
|
||||||
logrus.Debugf("could not find pvdisplay: %v", err)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
out, err := exec.Command(pvDisplay, dev).CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
logrus.WithError(err).Error(string(out))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
scanner := bufio.NewScanner(bytes.NewReader(bytes.TrimSpace(out)))
|
|
||||||
for scanner.Scan() {
|
|
||||||
fields := strings.SplitAfter(strings.TrimSpace(scanner.Text()), "VG Name")
|
|
||||||
if len(fields) > 1 {
|
|
||||||
// got "VG Name" line"
|
|
||||||
vg := strings.TrimSpace(fields[1])
|
|
||||||
if len(vg) > 0 {
|
|
||||||
return fmt.Errorf("%s is already part of a volume group %q: must remove this device from any volume group or provide a different device", dev, vg)
|
|
||||||
}
|
|
||||||
logrus.Error(fields)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkDevHasFS(dev string) error {
|
|
||||||
blkid, err := exec.LookPath("blkid")
|
|
||||||
if err != nil {
|
|
||||||
logrus.Debugf("could not find blkid %v", err)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
out, err := exec.Command(blkid, dev).CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
logrus.WithError(err).Error(string(out))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
fields := bytes.Fields(out)
|
|
||||||
for _, f := range fields {
|
|
||||||
kv := bytes.Split(f, []byte{'='})
|
|
||||||
if bytes.Equal(kv[0], []byte("TYPE")) {
|
|
||||||
v := bytes.Trim(kv[1], "\"")
|
|
||||||
if len(v) > 0 {
|
|
||||||
return fmt.Errorf("%s has a filesystem already, use dm.directlvm_device_force=true if you want to wipe the device", dev)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func verifyBlockDevice(dev string, force bool) error {
|
|
||||||
absPath, err := filepath.Abs(dev)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("unable to get absolute path for %s: %s", dev, err)
|
|
||||||
}
|
|
||||||
realPath, err := filepath.EvalSymlinks(absPath)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to canonicalise path for %s: %s", dev, err)
|
|
||||||
}
|
|
||||||
if err := checkDevAvailable(absPath); err != nil {
|
|
||||||
logrus.Infof("block device '%s' not available, checking '%s'", absPath, realPath)
|
|
||||||
if err := checkDevAvailable(realPath); err != nil {
|
|
||||||
return fmt.Errorf("neither '%s' nor '%s' are in the output of lvmdiskscan, can't use device", absPath, realPath)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err := checkDevInVG(realPath); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if force {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := checkDevHasFS(realPath); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func readLVMConfig(root string) (directLVMConfig, error) {
|
|
||||||
var cfg directLVMConfig
|
|
||||||
|
|
||||||
p := filepath.Join(root, "setup-config.json")
|
|
||||||
b, err := os.ReadFile(p)
|
|
||||||
if err != nil {
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
return cfg, nil
|
|
||||||
}
|
|
||||||
return cfg, fmt.Errorf("reading existing setup config: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// check if this is just an empty file, no need to produce a json error later if so
|
|
||||||
if len(b) == 0 {
|
|
||||||
return cfg, nil
|
|
||||||
}
|
|
||||||
if err := json.Unmarshal(b, &cfg); err != nil {
|
|
||||||
return cfg, fmt.Errorf("unmarshaling previous device setup config: %w", err)
|
|
||||||
}
|
|
||||||
return cfg, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func writeLVMConfig(root string, cfg directLVMConfig) error {
|
|
||||||
p := filepath.Join(root, "setup-config.json")
|
|
||||||
b, err := json.Marshal(cfg)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("marshalling direct lvm config: %w", err)
|
|
||||||
}
|
|
||||||
if err := os.WriteFile(p, b, 0o600); err != nil {
|
|
||||||
return fmt.Errorf("writing direct lvm config to file: %w", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func setupDirectLVM(cfg directLVMConfig) error {
|
|
||||||
lvmProfileDir := "/etc/lvm/profile"
|
|
||||||
binaries := []string{"pvcreate", "vgcreate", "lvcreate", "lvconvert", "lvchange", "thin_check"}
|
|
||||||
|
|
||||||
for _, bin := range binaries {
|
|
||||||
if _, err := exec.LookPath(bin); err != nil {
|
|
||||||
return fmt.Errorf("looking up command `"+bin+"` while setting up direct lvm: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
err := os.MkdirAll(lvmProfileDir, 0o755)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("creating lvm profile directory: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.AutoExtendPercent == 0 {
|
|
||||||
cfg.AutoExtendPercent = 20
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.AutoExtendThreshold == 0 {
|
|
||||||
cfg.AutoExtendThreshold = 80
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.ThinpPercent == 0 {
|
|
||||||
cfg.ThinpPercent = 95
|
|
||||||
}
|
|
||||||
if cfg.ThinpMetaPercent == 0 {
|
|
||||||
cfg.ThinpMetaPercent = 1
|
|
||||||
}
|
|
||||||
if cfg.MetaDataSize == "" {
|
|
||||||
cfg.MetaDataSize = "128k"
|
|
||||||
}
|
|
||||||
|
|
||||||
out, err := exec.Command("pvcreate", "--metadatasize", cfg.MetaDataSize, "-f", cfg.Device).CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("%v: %w", string(out), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
out, err = exec.Command("vgcreate", "storage", cfg.Device).CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("%v: %w", string(out), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
out, err = exec.Command("lvcreate", "--wipesignatures", "y", "-n", "thinpool", "storage", "--extents", fmt.Sprintf("%d%%VG", cfg.ThinpPercent)).CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("%v: %w", string(out), err)
|
|
||||||
}
|
|
||||||
out, err = exec.Command("lvcreate", "--wipesignatures", "y", "-n", "thinpoolmeta", "storage", "--extents", fmt.Sprintf("%d%%VG", cfg.ThinpMetaPercent)).CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("%v: %w", string(out), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
out, err = exec.Command("lvconvert", "-y", "--zero", "n", "-c", "512K", "--thinpool", "storage/thinpool", "--poolmetadata", "storage/thinpoolmeta").CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("%v: %w", string(out), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
profile := fmt.Sprintf("activation{\nthin_pool_autoextend_threshold=%d\nthin_pool_autoextend_percent=%d\n}", cfg.AutoExtendThreshold, cfg.AutoExtendPercent)
|
|
||||||
err = os.WriteFile(lvmProfileDir+"/storage-thinpool.profile", []byte(profile), 0o600)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("writing storage thinp autoextend profile: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
out, err = exec.Command("lvchange", "--metadataprofile", "storage-thinpool", "storage/thinpool").CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("%s: %w", string(out), err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,109 +0,0 @@
|
||||||
//go:build linux && cgo
|
|
||||||
// +build linux,cgo
|
|
||||||
|
|
||||||
package devmapper
|
|
||||||
|
|
||||||
// Definition of struct dm_task and sub structures (from lvm2)
|
|
||||||
//
|
|
||||||
// struct dm_ioctl {
|
|
||||||
// /*
|
|
||||||
// * The version number is made up of three parts:
|
|
||||||
// * major - no backward or forward compatibility,
|
|
||||||
// * minor - only backwards compatible,
|
|
||||||
// * patch - both backwards and forwards compatible.
|
|
||||||
// *
|
|
||||||
// * All clients of the ioctl interface should fill in the
|
|
||||||
// * version number of the interface that they were
|
|
||||||
// * compiled with.
|
|
||||||
// *
|
|
||||||
// * All recognized ioctl commands (ie. those that don't
|
|
||||||
// * return -ENOTTY) fill out this field, even if the
|
|
||||||
// * command failed.
|
|
||||||
// */
|
|
||||||
// uint32_t version[3]; /* in/out */
|
|
||||||
// uint32_t data_size; /* total size of data passed in
|
|
||||||
// * including this struct */
|
|
||||||
|
|
||||||
// uint32_t data_start; /* offset to start of data
|
|
||||||
// * relative to start of this struct */
|
|
||||||
|
|
||||||
// uint32_t target_count; /* in/out */
|
|
||||||
// int32_t open_count; /* out */
|
|
||||||
// uint32_t flags; /* in/out */
|
|
||||||
|
|
||||||
// /*
|
|
||||||
// * event_nr holds either the event number (input and output) or the
|
|
||||||
// * udev cookie value (input only).
|
|
||||||
// * The DM_DEV_WAIT ioctl takes an event number as input.
|
|
||||||
// * The DM_SUSPEND, DM_DEV_REMOVE and DM_DEV_RENAME ioctls
|
|
||||||
// * use the field as a cookie to return in the DM_COOKIE
|
|
||||||
// * variable with the uevents they issue.
|
|
||||||
// * For output, the ioctls return the event number, not the cookie.
|
|
||||||
// */
|
|
||||||
// uint32_t event_nr; /* in/out */
|
|
||||||
// uint32_t padding;
|
|
||||||
|
|
||||||
// uint64_t dev; /* in/out */
|
|
||||||
|
|
||||||
// char name[DM_NAME_LEN]; /* device name */
|
|
||||||
// char uuid[DM_UUID_LEN]; /* unique identifier for
|
|
||||||
// * the block device */
|
|
||||||
// char data[7]; /* padding or data */
|
|
||||||
// };
|
|
||||||
|
|
||||||
// struct target {
|
|
||||||
// uint64_t start;
|
|
||||||
// uint64_t length;
|
|
||||||
// char *type;
|
|
||||||
// char *params;
|
|
||||||
|
|
||||||
// struct target *next;
|
|
||||||
// };
|
|
||||||
|
|
||||||
// typedef enum {
|
|
||||||
// DM_ADD_NODE_ON_RESUME, /* add /dev/mapper node with dmsetup resume */
|
|
||||||
// DM_ADD_NODE_ON_CREATE /* add /dev/mapper node with dmsetup create */
|
|
||||||
// } dm_add_node_t;
|
|
||||||
|
|
||||||
// struct dm_task {
|
|
||||||
// int type;
|
|
||||||
// char *dev_name;
|
|
||||||
// char *mangled_dev_name;
|
|
||||||
|
|
||||||
// struct target *head, *tail;
|
|
||||||
|
|
||||||
// int read_only;
|
|
||||||
// uint32_t event_nr;
|
|
||||||
// int major;
|
|
||||||
// int minor;
|
|
||||||
// int allow_default_major_fallback;
|
|
||||||
// uid_t uid;
|
|
||||||
// gid_t gid;
|
|
||||||
// mode_t mode;
|
|
||||||
// uint32_t read_ahead;
|
|
||||||
// uint32_t read_ahead_flags;
|
|
||||||
// union {
|
|
||||||
// struct dm_ioctl *v4;
|
|
||||||
// } dmi;
|
|
||||||
// char *newname;
|
|
||||||
// char *message;
|
|
||||||
// char *geometry;
|
|
||||||
// uint64_t sector;
|
|
||||||
// int no_flush;
|
|
||||||
// int no_open_count;
|
|
||||||
// int skip_lockfs;
|
|
||||||
// int query_inactive_table;
|
|
||||||
// int suppress_identical_reload;
|
|
||||||
// dm_add_node_t add_node;
|
|
||||||
// uint64_t existing_table_size;
|
|
||||||
// int cookie_set;
|
|
||||||
// int new_uuid;
|
|
||||||
// int secure_data;
|
|
||||||
// int retry_remove;
|
|
||||||
// int enable_checks;
|
|
||||||
// int expected_errno;
|
|
||||||
|
|
||||||
// char *uuid;
|
|
||||||
// char *mangled_uuid;
|
|
||||||
// };
|
|
||||||
//
|
|
|
@ -1,272 +0,0 @@
|
||||||
//go:build linux && cgo
|
|
||||||
// +build linux,cgo
|
|
||||||
|
|
||||||
package devmapper
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
"strconv"
|
|
||||||
|
|
||||||
graphdriver "github.com/containers/storage/drivers"
|
|
||||||
"github.com/containers/storage/pkg/devicemapper"
|
|
||||||
"github.com/containers/storage/pkg/directory"
|
|
||||||
"github.com/containers/storage/pkg/fileutils"
|
|
||||||
"github.com/containers/storage/pkg/idtools"
|
|
||||||
"github.com/containers/storage/pkg/locker"
|
|
||||||
"github.com/containers/storage/pkg/mount"
|
|
||||||
units "github.com/docker/go-units"
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
"golang.org/x/sys/unix"
|
|
||||||
)
|
|
||||||
|
|
||||||
const defaultPerms = os.FileMode(0o555)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
graphdriver.MustRegister("devicemapper", Init)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Driver contains the device set mounted and the home directory
|
|
||||||
type Driver struct {
|
|
||||||
*DeviceSet
|
|
||||||
home string
|
|
||||||
uidMaps []idtools.IDMap
|
|
||||||
gidMaps []idtools.IDMap
|
|
||||||
ctr *graphdriver.RefCounter
|
|
||||||
locker *locker.Locker
|
|
||||||
}
|
|
||||||
|
|
||||||
// Init creates a driver with the given home and the set of options.
|
|
||||||
func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) {
|
|
||||||
deviceSet, err := NewDeviceSet(home, true, options.DriverOptions, options.UIDMaps, options.GIDMaps)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := mount.MakePrivate(home); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
d := &Driver{
|
|
||||||
DeviceSet: deviceSet,
|
|
||||||
home: home,
|
|
||||||
uidMaps: options.UIDMaps,
|
|
||||||
gidMaps: options.GIDMaps,
|
|
||||||
ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()),
|
|
||||||
locker: locker.New(),
|
|
||||||
}
|
|
||||||
return graphdriver.NewNaiveDiffDriver(d, graphdriver.NewNaiveLayerIDMapUpdater(d)), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Driver) String() string {
|
|
||||||
return "devicemapper"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Status returns the status about the driver in a printable format.
|
|
||||||
// Information returned contains Pool Name, Data File, Metadata file, disk usage by
|
|
||||||
// the data and metadata, etc.
|
|
||||||
func (d *Driver) Status() [][2]string {
|
|
||||||
s := d.DeviceSet.Status()
|
|
||||||
|
|
||||||
status := [][2]string{
|
|
||||||
{"Pool Name", s.PoolName},
|
|
||||||
{"Pool Blocksize", units.HumanSize(float64(s.SectorSize))},
|
|
||||||
{"Base Device Size", units.HumanSize(float64(s.BaseDeviceSize))},
|
|
||||||
{"Backing Filesystem", s.BaseDeviceFS},
|
|
||||||
{"Data file", s.DataFile},
|
|
||||||
{"Metadata file", s.MetadataFile},
|
|
||||||
{"Data Space Used", units.HumanSize(float64(s.Data.Used))},
|
|
||||||
{"Data Space Total", units.HumanSize(float64(s.Data.Total))},
|
|
||||||
{"Data Space Available", units.HumanSize(float64(s.Data.Available))},
|
|
||||||
{"Metadata Space Used", units.HumanSize(float64(s.Metadata.Used))},
|
|
||||||
{"Metadata Space Total", units.HumanSize(float64(s.Metadata.Total))},
|
|
||||||
{"Metadata Space Available", units.HumanSize(float64(s.Metadata.Available))},
|
|
||||||
{"Thin Pool Minimum Free Space", units.HumanSize(float64(s.MinFreeSpace))},
|
|
||||||
{"Udev Sync Supported", fmt.Sprintf("%v", s.UdevSyncSupported)},
|
|
||||||
{"Deferred Removal Enabled", fmt.Sprintf("%v", s.DeferredRemoveEnabled)},
|
|
||||||
{"Deferred Deletion Enabled", fmt.Sprintf("%v", s.DeferredDeleteEnabled)},
|
|
||||||
{"Deferred Deleted Device Count", fmt.Sprintf("%v", s.DeferredDeletedDeviceCount)},
|
|
||||||
}
|
|
||||||
if len(s.DataLoopback) > 0 {
|
|
||||||
status = append(status, [2]string{"Data loop file", s.DataLoopback})
|
|
||||||
}
|
|
||||||
if len(s.MetadataLoopback) > 0 {
|
|
||||||
status = append(status, [2]string{"Metadata loop file", s.MetadataLoopback})
|
|
||||||
}
|
|
||||||
if vStr, err := devicemapper.GetLibraryVersion(); err == nil {
|
|
||||||
status = append(status, [2]string{"Library Version", vStr})
|
|
||||||
}
|
|
||||||
return status
|
|
||||||
}
|
|
||||||
|
|
||||||
// Metadata returns a map of information about the device.
|
|
||||||
func (d *Driver) Metadata(id string) (map[string]string, error) {
|
|
||||||
m, err := d.DeviceSet.exportDeviceMetadata(id)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
metadata := make(map[string]string)
|
|
||||||
metadata["DeviceId"] = strconv.Itoa(m.deviceID)
|
|
||||||
metadata["DeviceSize"] = strconv.FormatUint(m.deviceSize, 10)
|
|
||||||
metadata["DeviceName"] = m.deviceName
|
|
||||||
return metadata, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Cleanup unmounts a device.
|
|
||||||
func (d *Driver) Cleanup() error {
|
|
||||||
err := d.DeviceSet.Shutdown(d.home)
|
|
||||||
|
|
||||||
umountErr := mount.Unmount(d.home)
|
|
||||||
// in case we have two errors, prefer the one from Shutdown()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return umountErr
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateFromTemplate creates a layer with the same contents and parent as another layer.
|
|
||||||
func (d *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error {
|
|
||||||
return d.Create(id, template, opts)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateReadWrite creates a layer that is writable for use as a container
|
|
||||||
// file system.
|
|
||||||
func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error {
|
|
||||||
return d.Create(id, parent, opts)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create adds a device with a given id and the parent.
|
|
||||||
func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
|
|
||||||
var storageOpt map[string]string
|
|
||||||
if opts != nil {
|
|
||||||
storageOpt = opts.StorageOpt
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := d.DeviceSet.AddDevice(id, parent, storageOpt); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove removes a device with a given id, unmounts the filesystem, and removes the mount point.
|
|
||||||
func (d *Driver) Remove(id string) error {
|
|
||||||
d.locker.Lock(id)
|
|
||||||
defer d.locker.Unlock(id)
|
|
||||||
if !d.DeviceSet.HasDevice(id) {
|
|
||||||
// Consider removing a non-existing device a no-op
|
|
||||||
// This is useful to be able to progress on container removal
|
|
||||||
// if the underlying device has gone away due to earlier errors
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// This assumes the device has been properly Get/Put:ed and thus is unmounted
|
|
||||||
if err := d.DeviceSet.DeleteDevice(id, false); err != nil {
|
|
||||||
return fmt.Errorf("failed to remove device %s: %v", id, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Most probably the mount point is already removed on Put()
|
|
||||||
// (see DeviceSet.UnmountDevice()), but just in case it was not
|
|
||||||
// let's try to remove it here as well, ignoring errors as
|
|
||||||
// an older kernel can return EBUSY if e.g. the mount was leaked
|
|
||||||
// to other mount namespaces. A failure to remove the container's
|
|
||||||
// mount point is not important and should not be treated
|
|
||||||
// as a failure to remove the container.
|
|
||||||
mp := path.Join(d.home, "mnt", id)
|
|
||||||
err := unix.Rmdir(mp)
|
|
||||||
if err != nil && !os.IsNotExist(err) {
|
|
||||||
logrus.WithField("storage-driver", "devicemapper").Warnf("unable to remove mount point %q: %s", mp, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get mounts a device with given id into the root filesystem
|
|
||||||
func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
|
|
||||||
d.locker.Lock(id)
|
|
||||||
defer d.locker.Unlock(id)
|
|
||||||
mp := path.Join(d.home, "mnt", id)
|
|
||||||
rootFs := path.Join(mp, "rootfs")
|
|
||||||
if count := d.ctr.Increment(mp); count > 1 {
|
|
||||||
return rootFs, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
uid, gid, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
|
|
||||||
if err != nil {
|
|
||||||
d.ctr.Decrement(mp)
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create the target directories if they don't exist
|
|
||||||
if err := idtools.MkdirAllAs(path.Join(d.home, "mnt"), 0o755, uid, gid); err != nil {
|
|
||||||
d.ctr.Decrement(mp)
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
if err := idtools.MkdirAs(mp, 0o755, uid, gid); err != nil && !os.IsExist(err) {
|
|
||||||
d.ctr.Decrement(mp)
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mount the device
|
|
||||||
if err := d.DeviceSet.MountDevice(id, mp, options); err != nil {
|
|
||||||
d.ctr.Decrement(mp)
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := idtools.MkdirAllAs(rootFs, defaultPerms, uid, gid); err != nil {
|
|
||||||
d.ctr.Decrement(mp)
|
|
||||||
d.DeviceSet.UnmountDevice(id, mp)
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
idFile := path.Join(mp, "id")
|
|
||||||
if err := fileutils.Exists(idFile); err != nil && os.IsNotExist(err) {
|
|
||||||
// Create an "id" file with the container/image id in it to help reconstruct this in case
|
|
||||||
// of later problems
|
|
||||||
if err := os.WriteFile(idFile, []byte(id), 0o600); err != nil {
|
|
||||||
d.ctr.Decrement(mp)
|
|
||||||
d.DeviceSet.UnmountDevice(id, mp)
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return rootFs, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put unmounts a device and removes it.
|
|
||||||
func (d *Driver) Put(id string) error {
|
|
||||||
d.locker.Lock(id)
|
|
||||||
defer d.locker.Unlock(id)
|
|
||||||
mp := path.Join(d.home, "mnt", id)
|
|
||||||
if count := d.ctr.Decrement(mp); count > 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
err := d.DeviceSet.UnmountDevice(id, mp)
|
|
||||||
if err != nil {
|
|
||||||
logrus.Errorf("devmapper: Error unmounting device %s: %v", id, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadWriteDiskUsage returns the disk usage of the writable directory for the ID.
|
|
||||||
// For devmapper, it queries the mnt path for this ID.
|
|
||||||
func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) {
|
|
||||||
d.locker.Lock(id)
|
|
||||||
defer d.locker.Unlock(id)
|
|
||||||
return directory.Usage(path.Join(d.home, "mnt", id))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Exists checks to see if the device exists.
|
|
||||||
func (d *Driver) Exists(id string) bool {
|
|
||||||
return d.DeviceSet.HasDevice(id)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AdditionalImageStores returns additional image stores supported by the driver
|
|
||||||
func (d *Driver) AdditionalImageStores() []string {
|
|
||||||
return nil
|
|
||||||
}
|
|
|
@ -1,8 +0,0 @@
|
||||||
//go:build linux && cgo
|
|
||||||
// +build linux,cgo
|
|
||||||
|
|
||||||
package devmapper
|
|
||||||
|
|
||||||
import jsoniter "github.com/json-iterator/go"
|
|
||||||
|
|
||||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
|
|
@ -1,89 +0,0 @@
|
||||||
//go:build linux && cgo
|
|
||||||
// +build linux,cgo
|
|
||||||
|
|
||||||
package devmapper
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
|
|
||||||
"golang.org/x/sys/unix"
|
|
||||||
)
|
|
||||||
|
|
||||||
// FIXME: this is copy-pasted from the aufs driver.
|
|
||||||
// It should be moved into the core.
|
|
||||||
|
|
||||||
// Mounted returns true if a mount point exists.
|
|
||||||
func Mounted(mountpoint string) (bool, error) {
|
|
||||||
var mntpointSt unix.Stat_t
|
|
||||||
if err := unix.Stat(mountpoint, &mntpointSt); err != nil {
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
var parentSt unix.Stat_t
|
|
||||||
if err := unix.Stat(filepath.Join(mountpoint, ".."), &parentSt); err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
return mntpointSt.Dev != parentSt.Dev, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type probeData struct {
|
|
||||||
fsName string
|
|
||||||
magic string
|
|
||||||
offset uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
// ProbeFsType returns the filesystem name for the given device id.
|
|
||||||
func ProbeFsType(device string) (string, error) {
|
|
||||||
probes := []probeData{
|
|
||||||
{"btrfs", "_BHRfS_M", 0x10040},
|
|
||||||
{"ext4", "\123\357", 0x438},
|
|
||||||
{"xfs", "XFSB", 0},
|
|
||||||
}
|
|
||||||
|
|
||||||
maxLen := uint64(0)
|
|
||||||
for _, p := range probes {
|
|
||||||
l := p.offset + uint64(len(p.magic))
|
|
||||||
if l > maxLen {
|
|
||||||
maxLen = l
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
file, err := os.Open(device)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
|
|
||||||
buffer := make([]byte, maxLen)
|
|
||||||
l, err := file.Read(buffer)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
if uint64(l) != maxLen {
|
|
||||||
return "", fmt.Errorf("devmapper: unable to detect filesystem type of %s, short read", device)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, p := range probes {
|
|
||||||
if bytes.Equal([]byte(p.magic), buffer[p.offset:p.offset+uint64(len(p.magic))]) {
|
|
||||||
return p.fsName, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return "", fmt.Errorf("devmapper: Unknown filesystem type on %s", device)
|
|
||||||
}
|
|
||||||
|
|
||||||
func joinMountOptions(a, b string) string {
|
|
||||||
if a == "" {
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
if b == "" {
|
|
||||||
return a
|
|
||||||
}
|
|
||||||
return a + "," + b
|
|
||||||
}
|
|
|
@ -298,8 +298,8 @@ type AdditionalLayerStoreDriver interface {
|
||||||
Driver
|
Driver
|
||||||
|
|
||||||
// LookupAdditionalLayer looks up additional layer store by the specified
|
// LookupAdditionalLayer looks up additional layer store by the specified
|
||||||
// digest and ref and returns an object representing that layer.
|
// TOC digest and ref and returns an object representing that layer.
|
||||||
LookupAdditionalLayer(d digest.Digest, ref string) (AdditionalLayer, error)
|
LookupAdditionalLayer(tocDigest digest.Digest, ref string) (AdditionalLayer, error)
|
||||||
|
|
||||||
// LookupAdditionalLayer looks up additional layer store by the specified
|
// LookupAdditionalLayer looks up additional layer store by the specified
|
||||||
// ID and returns an object representing that layer.
|
// ID and returns an object representing that layer.
|
||||||
|
|
|
@ -94,8 +94,6 @@ var (
|
||||||
// Slice of drivers that should be used in an order
|
// Slice of drivers that should be used in an order
|
||||||
Priority = []string{
|
Priority = []string{
|
||||||
"overlay",
|
"overlay",
|
||||||
// We don't support devicemapper without configuration
|
|
||||||
// "devicemapper",
|
|
||||||
"aufs",
|
"aufs",
|
||||||
"btrfs",
|
"btrfs",
|
||||||
"zfs",
|
"zfs",
|
||||||
|
|
|
@ -4,12 +4,14 @@
|
||||||
package overlay
|
package overlay
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/containers/storage/pkg/chunked/dump"
|
"github.com/containers/storage/pkg/chunked/dump"
|
||||||
|
@ -70,12 +72,18 @@ func generateComposeFsBlob(verityDigests map[string]string, toc interface{}, com
|
||||||
// a scope to close outFd before setting fsverity on the read-only fd.
|
// a scope to close outFd before setting fsverity on the read-only fd.
|
||||||
defer outFd.Close()
|
defer outFd.Close()
|
||||||
|
|
||||||
|
errBuf := &bytes.Buffer{}
|
||||||
cmd := exec.Command(writerJson, "--from-file", "-", "/proc/self/fd/3")
|
cmd := exec.Command(writerJson, "--from-file", "-", "/proc/self/fd/3")
|
||||||
cmd.ExtraFiles = []*os.File{outFd}
|
cmd.ExtraFiles = []*os.File{outFd}
|
||||||
cmd.Stderr = os.Stderr
|
cmd.Stderr = errBuf
|
||||||
cmd.Stdin = dumpReader
|
cmd.Stdin = dumpReader
|
||||||
if err := cmd.Run(); err != nil {
|
if err := cmd.Run(); err != nil {
|
||||||
return fmt.Errorf("failed to convert json to erofs: %w", err)
|
rErr := fmt.Errorf("failed to convert json to erofs: %w", err)
|
||||||
|
exitErr := &exec.ExitError{}
|
||||||
|
if errors.As(err, &exitErr) {
|
||||||
|
return fmt.Errorf("%w: %s", rErr, strings.TrimSpace(errBuf.String()))
|
||||||
|
}
|
||||||
|
return rErr
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}()
|
}()
|
||||||
|
|
|
@ -28,6 +28,7 @@ import (
|
||||||
"github.com/containers/storage/pkg/fsutils"
|
"github.com/containers/storage/pkg/fsutils"
|
||||||
"github.com/containers/storage/pkg/idmap"
|
"github.com/containers/storage/pkg/idmap"
|
||||||
"github.com/containers/storage/pkg/idtools"
|
"github.com/containers/storage/pkg/idtools"
|
||||||
|
"github.com/containers/storage/pkg/lockfile"
|
||||||
"github.com/containers/storage/pkg/mount"
|
"github.com/containers/storage/pkg/mount"
|
||||||
"github.com/containers/storage/pkg/parsers"
|
"github.com/containers/storage/pkg/parsers"
|
||||||
"github.com/containers/storage/pkg/system"
|
"github.com/containers/storage/pkg/system"
|
||||||
|
@ -83,6 +84,8 @@ const (
|
||||||
lowerFile = "lower"
|
lowerFile = "lower"
|
||||||
maxDepth = 500
|
maxDepth = 500
|
||||||
|
|
||||||
|
stagingLockFile = "staging.lock"
|
||||||
|
|
||||||
tocArtifact = "toc"
|
tocArtifact = "toc"
|
||||||
fsVerityDigestsArtifact = "fs-verity-digests"
|
fsVerityDigestsArtifact = "fs-verity-digests"
|
||||||
|
|
||||||
|
@ -127,6 +130,8 @@ type Driver struct {
|
||||||
usingMetacopy bool
|
usingMetacopy bool
|
||||||
usingComposefs bool
|
usingComposefs bool
|
||||||
|
|
||||||
|
stagingDirsLocks map[string]*lockfile.LockFile
|
||||||
|
|
||||||
supportsIDMappedMounts *bool
|
supportsIDMappedMounts *bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -460,6 +465,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
|
||||||
supportsVolatile: supportsVolatile,
|
supportsVolatile: supportsVolatile,
|
||||||
usingComposefs: opts.useComposefs,
|
usingComposefs: opts.useComposefs,
|
||||||
options: *opts,
|
options: *opts,
|
||||||
|
stagingDirsLocks: make(map[string]*lockfile.LockFile),
|
||||||
}
|
}
|
||||||
|
|
||||||
d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, graphdriver.NewNaiveLayerIDMapUpdater(d))
|
d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, graphdriver.NewNaiveLayerIDMapUpdater(d))
|
||||||
|
@ -876,20 +882,54 @@ func (d *Driver) Metadata(id string) (map[string]string, error) {
|
||||||
return metadata, nil
|
return metadata, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cleanup any state created by overlay which should be cleaned when daemon
|
// Cleanup any state created by overlay which should be cleaned when
|
||||||
// is being shutdown. For now, we just have to unmount the bind mounted
|
// the storage is being shutdown. The only state created by the driver
|
||||||
// we had created.
|
// is the bind mount on the home directory.
|
||||||
func (d *Driver) Cleanup() error {
|
func (d *Driver) Cleanup() error {
|
||||||
_ = os.RemoveAll(filepath.Join(d.home, stagingDir))
|
anyPresent := d.pruneStagingDirectories()
|
||||||
|
if anyPresent {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
return mount.Unmount(d.home)
|
return mount.Unmount(d.home)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// pruneStagingDirectories cleans up any staging directory that was leaked.
|
||||||
|
// It returns whether any staging directory is still present.
|
||||||
|
func (d *Driver) pruneStagingDirectories() bool {
|
||||||
|
for _, lock := range d.stagingDirsLocks {
|
||||||
|
lock.Unlock()
|
||||||
|
}
|
||||||
|
d.stagingDirsLocks = make(map[string]*lockfile.LockFile)
|
||||||
|
|
||||||
|
anyPresent := false
|
||||||
|
|
||||||
|
homeStagingDir := filepath.Join(d.home, stagingDir)
|
||||||
|
dirs, err := os.ReadDir(homeStagingDir)
|
||||||
|
if err == nil {
|
||||||
|
for _, dir := range dirs {
|
||||||
|
stagingDirToRemove := filepath.Join(homeStagingDir, dir.Name())
|
||||||
|
lock, err := lockfile.GetLockFile(filepath.Join(stagingDirToRemove, stagingLockFile))
|
||||||
|
if err != nil {
|
||||||
|
anyPresent = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err := lock.TryLock(); err != nil {
|
||||||
|
anyPresent = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
_ = os.RemoveAll(stagingDirToRemove)
|
||||||
|
lock.Unlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return anyPresent
|
||||||
|
}
|
||||||
|
|
||||||
// LookupAdditionalLayer looks up additional layer store by the specified
|
// LookupAdditionalLayer looks up additional layer store by the specified
|
||||||
// digest and ref and returns an object representing that layer.
|
// TOC digest and ref and returns an object representing that layer.
|
||||||
// This API is experimental and can be changed without bumping the major version number.
|
// This API is experimental and can be changed without bumping the major version number.
|
||||||
// TODO: to remove the comment once it's no longer experimental.
|
// TODO: to remove the comment once it's no longer experimental.
|
||||||
func (d *Driver) LookupAdditionalLayer(dgst digest.Digest, ref string) (graphdriver.AdditionalLayer, error) {
|
func (d *Driver) LookupAdditionalLayer(tocDigest digest.Digest, ref string) (graphdriver.AdditionalLayer, error) {
|
||||||
l, err := d.getAdditionalLayerPath(dgst, ref)
|
l, err := d.getAdditionalLayerPath(tocDigest, ref)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -2029,7 +2069,14 @@ func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) {
|
||||||
|
|
||||||
// CleanupStagingDirectory cleanups the staging directory.
|
// CleanupStagingDirectory cleanups the staging directory.
|
||||||
func (d *Driver) CleanupStagingDirectory(stagingDirectory string) error {
|
func (d *Driver) CleanupStagingDirectory(stagingDirectory string) error {
|
||||||
return os.RemoveAll(stagingDirectory)
|
parentStagingDir := filepath.Dir(stagingDirectory)
|
||||||
|
|
||||||
|
if lock, ok := d.stagingDirsLocks[parentStagingDir]; ok {
|
||||||
|
delete(d.stagingDirsLocks, parentStagingDir)
|
||||||
|
lock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
return os.RemoveAll(parentStagingDir)
|
||||||
}
|
}
|
||||||
|
|
||||||
func supportsDataOnlyLayersCached(home, runhome string) (bool, error) {
|
func supportsDataOnlyLayersCached(home, runhome string) (bool, error) {
|
||||||
|
@ -2050,8 +2097,8 @@ func supportsDataOnlyLayersCached(home, runhome string) (bool, error) {
|
||||||
return supportsDataOnly, err
|
return supportsDataOnly, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// ApplyDiff applies the changes in the new layer using the specified function
|
// ApplyDiffWithDiffer applies the changes in the new layer using the specified function
|
||||||
func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.ApplyDiffWithDifferOpts, differ graphdriver.Differ) (output graphdriver.DriverWithDifferOutput, err error) {
|
func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.ApplyDiffWithDifferOpts, differ graphdriver.Differ) (output graphdriver.DriverWithDifferOutput, errRet error) {
|
||||||
var idMappings *idtools.IDMappings
|
var idMappings *idtools.IDMappings
|
||||||
if options != nil {
|
if options != nil {
|
||||||
idMappings = options.Mappings
|
idMappings = options.Mappings
|
||||||
|
@ -2068,7 +2115,7 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App
|
||||||
if err != nil && !os.IsExist(err) {
|
if err != nil && !os.IsExist(err) {
|
||||||
return graphdriver.DriverWithDifferOutput{}, err
|
return graphdriver.DriverWithDifferOutput{}, err
|
||||||
}
|
}
|
||||||
applyDir, err = os.MkdirTemp(stagingDir, "")
|
layerDir, err := os.MkdirTemp(stagingDir, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return graphdriver.DriverWithDifferOutput{}, err
|
return graphdriver.DriverWithDifferOutput{}, err
|
||||||
}
|
}
|
||||||
|
@ -2076,9 +2123,23 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App
|
||||||
if d.options.forceMask != nil {
|
if d.options.forceMask != nil {
|
||||||
perms = *d.options.forceMask
|
perms = *d.options.forceMask
|
||||||
}
|
}
|
||||||
if err := os.Chmod(applyDir, perms); err != nil {
|
applyDir = filepath.Join(layerDir, "dir")
|
||||||
|
if err := os.Mkdir(applyDir, perms); err != nil {
|
||||||
return graphdriver.DriverWithDifferOutput{}, err
|
return graphdriver.DriverWithDifferOutput{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
lock, err := lockfile.GetLockFile(filepath.Join(layerDir, stagingLockFile))
|
||||||
|
if err != nil {
|
||||||
|
return graphdriver.DriverWithDifferOutput{}, err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if errRet != nil {
|
||||||
|
delete(d.stagingDirsLocks, layerDir)
|
||||||
|
lock.Unlock()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
d.stagingDirsLocks[layerDir] = lock
|
||||||
|
lock.Lock()
|
||||||
} else {
|
} else {
|
||||||
var err error
|
var err error
|
||||||
applyDir, err = d.getDiffPath(id)
|
applyDir, err = d.getDiffPath(id)
|
||||||
|
@ -2112,9 +2173,19 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App
|
||||||
// ApplyDiffFromStagingDirectory applies the changes using the specified staging directory.
|
// ApplyDiffFromStagingDirectory applies the changes using the specified staging directory.
|
||||||
func (d *Driver) ApplyDiffFromStagingDirectory(id, parent string, diffOutput *graphdriver.DriverWithDifferOutput, options *graphdriver.ApplyDiffWithDifferOpts) error {
|
func (d *Driver) ApplyDiffFromStagingDirectory(id, parent string, diffOutput *graphdriver.DriverWithDifferOutput, options *graphdriver.ApplyDiffWithDifferOpts) error {
|
||||||
stagingDirectory := diffOutput.Target
|
stagingDirectory := diffOutput.Target
|
||||||
if filepath.Dir(stagingDirectory) != d.getStagingDir(id) {
|
parentStagingDir := filepath.Dir(stagingDirectory)
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if lock, ok := d.stagingDirsLocks[parentStagingDir]; ok {
|
||||||
|
delete(d.stagingDirsLocks, parentStagingDir)
|
||||||
|
lock.Unlock()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if filepath.Dir(parentStagingDir) != d.getStagingDir(id) {
|
||||||
return fmt.Errorf("%q is not a staging directory", stagingDirectory)
|
return fmt.Errorf("%q is not a staging directory", stagingDirectory)
|
||||||
}
|
}
|
||||||
|
|
||||||
diffPath, err := d.getDiffPath(id)
|
diffPath, err := d.getDiffPath(id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -2405,14 +2476,14 @@ func nameWithSuffix(name string, number int) string {
|
||||||
return fmt.Sprintf("%s%d", name, number)
|
return fmt.Sprintf("%s%d", name, number)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Driver) getAdditionalLayerPath(dgst digest.Digest, ref string) (string, error) {
|
func (d *Driver) getAdditionalLayerPath(tocDigest digest.Digest, ref string) (string, error) {
|
||||||
refElem := base64.StdEncoding.EncodeToString([]byte(ref))
|
refElem := base64.StdEncoding.EncodeToString([]byte(ref))
|
||||||
for _, ls := range d.options.layerStores {
|
for _, ls := range d.options.layerStores {
|
||||||
ref := ""
|
ref := ""
|
||||||
if ls.withReference {
|
if ls.withReference {
|
||||||
ref = refElem
|
ref = refElem
|
||||||
}
|
}
|
||||||
target := path.Join(ls.path, ref, dgst.String())
|
target := path.Join(ls.path, ref, tocDigest.String())
|
||||||
// Check if all necessary files exist
|
// Check if all necessary files exist
|
||||||
for _, p := range []string{
|
for _, p := range []string{
|
||||||
filepath.Join(target, "diff"),
|
filepath.Join(target, "diff"),
|
||||||
|
@ -2427,7 +2498,7 @@ func (d *Driver) getAdditionalLayerPath(dgst digest.Digest, ref string) (string,
|
||||||
return target, nil
|
return target, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return "", fmt.Errorf("additional layer (%q, %q) not found: %w", dgst, ref, graphdriver.ErrLayerUnknown)
|
return "", fmt.Errorf("additional layer (%q, %q) not found: %w", tocDigest, ref, graphdriver.ErrLayerUnknown)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Driver) releaseAdditionalLayerByID(id string) {
|
func (d *Driver) releaseAdditionalLayerByID(id string) {
|
||||||
|
|
|
@ -1,9 +0,0 @@
|
||||||
//go:build !exclude_graphdriver_devicemapper && linux && cgo
|
|
||||||
// +build !exclude_graphdriver_devicemapper,linux,cgo
|
|
||||||
|
|
||||||
package register
|
|
||||||
|
|
||||||
import (
|
|
||||||
// register the devmapper graphdriver
|
|
||||||
_ "github.com/containers/storage/drivers/devmapper"
|
|
||||||
)
|
|
|
@ -111,7 +111,7 @@ func (s *idSet) findAvailable(n int) (*idSet, error) {
|
||||||
iterator, cancel := s.iterator()
|
iterator, cancel := s.iterator()
|
||||||
defer cancel()
|
defer cancel()
|
||||||
for i := iterator(); n > 0 && i != nil; i = iterator() {
|
for i := iterator(); n > 0 && i != nil; i = iterator() {
|
||||||
i.end = minInt(i.end, i.start+n)
|
i.end = min(i.end, i.start+n)
|
||||||
intervals = append(intervals, *i)
|
intervals = append(intervals, *i)
|
||||||
n -= i.length()
|
n -= i.length()
|
||||||
}
|
}
|
||||||
|
@ -129,7 +129,7 @@ func (s *idSet) zip(container *idSet) []idtools.IDMap {
|
||||||
defer containerCancel()
|
defer containerCancel()
|
||||||
var out []idtools.IDMap
|
var out []idtools.IDMap
|
||||||
for h, c := hostIterator(), containerIterator(); h != nil && c != nil; {
|
for h, c := hostIterator(), containerIterator(); h != nil && c != nil; {
|
||||||
if n := minInt(h.length(), c.length()); n > 0 {
|
if n := min(h.length(), c.length()); n > 0 {
|
||||||
out = append(out, idtools.IDMap{
|
out = append(out, idtools.IDMap{
|
||||||
ContainerID: c.start,
|
ContainerID: c.start,
|
||||||
HostID: h.start,
|
HostID: h.start,
|
||||||
|
@ -159,12 +159,12 @@ type interval struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i interval) length() int {
|
func (i interval) length() int {
|
||||||
return maxInt(0, i.end-i.start)
|
return max(0, i.end-i.start)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i interval) Intersect(other intervalset.Interval) intervalset.Interval {
|
func (i interval) Intersect(other intervalset.Interval) intervalset.Interval {
|
||||||
j := other.(interval)
|
j := other.(interval)
|
||||||
return interval{start: maxInt(i.start, j.start), end: minInt(i.end, j.end)}
|
return interval{start: max(i.start, j.start), end: min(i.end, j.end)}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i interval) Before(other intervalset.Interval) bool {
|
func (i interval) Before(other intervalset.Interval) bool {
|
||||||
|
@ -183,15 +183,15 @@ func (i interval) Bisect(other intervalset.Interval) (intervalset.Interval, inte
|
||||||
}
|
}
|
||||||
// Subtracting [j.start, j.end) is equivalent to the union of intersecting (-inf, j.start) and
|
// Subtracting [j.start, j.end) is equivalent to the union of intersecting (-inf, j.start) and
|
||||||
// [j.end, +inf).
|
// [j.end, +inf).
|
||||||
left := interval{start: i.start, end: minInt(i.end, j.start)}
|
left := interval{start: i.start, end: min(i.end, j.start)}
|
||||||
right := interval{start: maxInt(i.start, j.end), end: i.end}
|
right := interval{start: max(i.start, j.end), end: i.end}
|
||||||
return left, right
|
return left, right
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i interval) Adjoin(other intervalset.Interval) intervalset.Interval {
|
func (i interval) Adjoin(other intervalset.Interval) intervalset.Interval {
|
||||||
j := other.(interval)
|
j := other.(interval)
|
||||||
if !i.IsZero() && !j.IsZero() && (i.end == j.start || j.end == i.start) {
|
if !i.IsZero() && !j.IsZero() && (i.end == j.start || j.end == i.start) {
|
||||||
return interval{start: minInt(i.start, j.start), end: maxInt(i.end, j.end)}
|
return interval{start: min(i.start, j.start), end: max(i.end, j.end)}
|
||||||
}
|
}
|
||||||
return interval{}
|
return interval{}
|
||||||
}
|
}
|
||||||
|
@ -204,24 +204,10 @@ func (i interval) Encompass(other intervalset.Interval) intervalset.Interval {
|
||||||
case j.IsZero():
|
case j.IsZero():
|
||||||
return i
|
return i
|
||||||
default:
|
default:
|
||||||
return interval{start: minInt(i.start, j.start), end: maxInt(i.end, j.end)}
|
return interval{start: min(i.start, j.start), end: max(i.end, j.end)}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func minInt(a, b int) int {
|
|
||||||
if a < b {
|
|
||||||
return a
|
|
||||||
}
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
func maxInt(a, b int) int {
|
|
||||||
if a < b {
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
return a
|
|
||||||
}
|
|
||||||
|
|
||||||
func hasOverlappingRanges(mappings []idtools.IDMap) error {
|
func hasOverlappingRanges(mappings []idtools.IDMap) error {
|
||||||
hostIntervals := intervalset.Empty()
|
hostIntervals := intervalset.Empty()
|
||||||
containerIntervals := intervalset.Empty()
|
containerIntervals := intervalset.Empty()
|
||||||
|
|
|
@ -0,0 +1,87 @@
|
||||||
|
package chunked
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"hash/crc32"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
type bloomFilter struct {
|
||||||
|
bitArray []uint64
|
||||||
|
k uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func newBloomFilter(size int, k uint32) *bloomFilter {
|
||||||
|
numElements := (size + 63) / 64
|
||||||
|
if numElements == 0 {
|
||||||
|
numElements = 1
|
||||||
|
}
|
||||||
|
return &bloomFilter{
|
||||||
|
bitArray: make([]uint64, numElements),
|
||||||
|
k: k,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newBloomFilterFromArray(bitArray []uint64, k uint32) *bloomFilter {
|
||||||
|
return &bloomFilter{
|
||||||
|
bitArray: bitArray,
|
||||||
|
k: k,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bf *bloomFilter) hashFn(item []byte, seed uint32) (uint64, uint64) {
|
||||||
|
if len(item) == 0 {
|
||||||
|
return 0, 1
|
||||||
|
}
|
||||||
|
mod := uint32(len(bf.bitArray) * 64)
|
||||||
|
seedSplit := seed % uint32(len(item))
|
||||||
|
hash := (crc32.ChecksumIEEE(item[:seedSplit]) ^ crc32.ChecksumIEEE(item[seedSplit:])) % mod
|
||||||
|
return uint64(hash / 64), uint64(1 << (hash % 64))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bf *bloomFilter) add(item []byte) {
|
||||||
|
for i := uint32(0); i < bf.k; i++ {
|
||||||
|
index, mask := bf.hashFn(item, i)
|
||||||
|
bf.bitArray[index] |= mask
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bf *bloomFilter) maybeContains(item []byte) bool {
|
||||||
|
for i := uint32(0); i < bf.k; i++ {
|
||||||
|
index, mask := bf.hashFn(item, i)
|
||||||
|
if bf.bitArray[index]&mask == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bf *bloomFilter) writeTo(writer io.Writer) error {
|
||||||
|
if err := binary.Write(writer, binary.LittleEndian, uint64(len(bf.bitArray))); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := binary.Write(writer, binary.LittleEndian, uint32(bf.k)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := binary.Write(writer, binary.LittleEndian, bf.bitArray); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func readBloomFilter(reader io.Reader) (*bloomFilter, error) {
|
||||||
|
var bloomFilterLen uint64
|
||||||
|
var k uint32
|
||||||
|
|
||||||
|
if err := binary.Read(reader, binary.LittleEndian, &bloomFilterLen); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := binary.Read(reader, binary.LittleEndian, &k); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
bloomFilterArray := make([]uint64, bloomFilterLen)
|
||||||
|
if err := binary.Read(reader, binary.LittleEndian, &bloomFilterArray); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return newBloomFilterFromArray(bloomFilterArray, k), nil
|
||||||
|
}
|
|
@ -3,17 +3,16 @@ package chunked
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
|
"encoding/hex"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
"runtime"
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
"unsafe"
|
|
||||||
|
|
||||||
storage "github.com/containers/storage"
|
storage "github.com/containers/storage"
|
||||||
graphdriver "github.com/containers/storage/drivers"
|
graphdriver "github.com/containers/storage/drivers"
|
||||||
|
@ -27,16 +26,24 @@ import (
|
||||||
|
|
||||||
const (
|
const (
|
||||||
cacheKey = "chunked-manifest-cache"
|
cacheKey = "chunked-manifest-cache"
|
||||||
cacheVersion = 2
|
cacheVersion = 3
|
||||||
|
|
||||||
digestSha256Empty = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
|
digestSha256Empty = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
|
||||||
|
|
||||||
|
// Using 3 hashes functions and n/m = 10 gives a false positive rate of ~1.7%:
|
||||||
|
// https://pages.cs.wisc.edu/~cao/papers/summary-cache/node8.html
|
||||||
|
bloomFilterScale = 10 // how much bigger is the bloom filter than the number of entries
|
||||||
|
bloomFilterHashes = 3 // number of hash functions for the bloom filter
|
||||||
)
|
)
|
||||||
|
|
||||||
type cacheFile struct {
|
type cacheFile struct {
|
||||||
tagLen int
|
tagLen int
|
||||||
digestLen int
|
digestLen int
|
||||||
tags []byte
|
fnamesLen int
|
||||||
vdata []byte
|
tags []byte
|
||||||
|
vdata []byte
|
||||||
|
fnames []byte
|
||||||
|
bloomFilter *bloomFilter
|
||||||
}
|
}
|
||||||
|
|
||||||
type layer struct {
|
type layer struct {
|
||||||
|
@ -154,6 +161,23 @@ fallback:
|
||||||
return buf, nil, err
|
return buf, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func makeBinaryDigest(stringDigest string) ([]byte, error) {
|
||||||
|
d, err := digest.Parse(stringDigest)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
digestBytes, err := hex.DecodeString(d.Encoded())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
algo := []byte(d.Algorithm())
|
||||||
|
buf := make([]byte, 0, len(algo)+1+len(digestBytes))
|
||||||
|
buf = append(buf, algo...)
|
||||||
|
buf = append(buf, ':')
|
||||||
|
buf = append(buf, digestBytes...)
|
||||||
|
return buf, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (c *layersCache) loadLayerCache(layerID string) (_ *layer, errRet error) {
|
func (c *layersCache) loadLayerCache(layerID string) (_ *layer, errRet error) {
|
||||||
buffer, mmapBuffer, err := c.loadLayerBigData(layerID, cacheKey)
|
buffer, mmapBuffer, err := c.loadLayerBigData(layerID, cacheKey)
|
||||||
if err != nil && !errors.Is(err, os.ErrNotExist) {
|
if err != nil && !errors.Is(err, os.ErrNotExist) {
|
||||||
|
@ -175,6 +199,8 @@ func (c *layersCache) loadLayerCache(layerID string) (_ *layer, errRet error) {
|
||||||
return c.createLayer(layerID, cacheFile, mmapBuffer)
|
return c.createLayer(layerID, cacheFile, mmapBuffer)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// createCacheFileFromTOC attempts to create a cache file for the specified layer.
|
||||||
|
// If a TOC is not available, the cache won't be created and nil is returned.
|
||||||
func (c *layersCache) createCacheFileFromTOC(layerID string) (*layer, error) {
|
func (c *layersCache) createCacheFileFromTOC(layerID string) (*layer, error) {
|
||||||
clFile, err := c.store.LayerBigData(layerID, chunkedLayerDataKey)
|
clFile, err := c.store.LayerBigData(layerID, chunkedLayerDataKey)
|
||||||
if err != nil && !errors.Is(err, os.ErrNotExist) {
|
if err != nil && !errors.Is(err, os.ErrNotExist) {
|
||||||
|
@ -195,6 +221,10 @@ func (c *layersCache) createCacheFileFromTOC(layerID string) (*layer, error) {
|
||||||
}
|
}
|
||||||
manifestReader, err := c.store.LayerBigData(layerID, bigDataKey)
|
manifestReader, err := c.store.LayerBigData(layerID, bigDataKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
// the cache file is not needed since there is no manifest file.
|
||||||
|
if errors.Is(err, os.ErrNotExist) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer manifestReader.Close()
|
defer manifestReader.Close()
|
||||||
|
@ -244,7 +274,7 @@ func (c *layersCache) load() error {
|
||||||
// try to read the existing cache file.
|
// try to read the existing cache file.
|
||||||
l, err := c.loadLayerCache(r.ID)
|
l, err := c.loadLayerCache(r.ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Warningf("Error loading cache file for layer %q: %v", r.ID, err)
|
logrus.Infof("Error loading cache file for layer %q: %v", r.ID, err)
|
||||||
}
|
}
|
||||||
if l != nil {
|
if l != nil {
|
||||||
newLayers = append(newLayers, l)
|
newLayers = append(newLayers, l)
|
||||||
|
@ -303,16 +333,46 @@ func calculateHardLinkFingerprint(f *fileMetadata) (string, error) {
|
||||||
return string(digester.Digest()), nil
|
return string(digester.Digest()), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// generateFileLocation generates a file location in the form $OFFSET:$LEN:$PATH
|
// generateFileLocation generates a file location in the form $OFFSET$LEN$PATH_POS
|
||||||
func generateFileLocation(path string, offset, len uint64) []byte {
|
func generateFileLocation(pathPos int, offset, len uint64) []byte {
|
||||||
return []byte(fmt.Sprintf("%d:%d:%s", offset, len, path))
|
var buf []byte
|
||||||
|
|
||||||
|
buf = binary.AppendUvarint(buf, uint64(pathPos))
|
||||||
|
buf = binary.AppendUvarint(buf, offset)
|
||||||
|
buf = binary.AppendUvarint(buf, len)
|
||||||
|
|
||||||
|
return buf
|
||||||
}
|
}
|
||||||
|
|
||||||
// generateTag generates a tag in the form $DIGEST$OFFSET@LEN.
|
// parseFileLocation reads what was written by generateFileLocation.
|
||||||
// the [OFFSET; LEN] points to the variable length data where the file locations
|
func parseFileLocation(locationData []byte) (int, uint64, uint64, error) {
|
||||||
|
reader := bytes.NewReader(locationData)
|
||||||
|
|
||||||
|
pathPos, err := binary.ReadUvarint(reader)
|
||||||
|
if err != nil {
|
||||||
|
return 0, 0, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
offset, err := binary.ReadUvarint(reader)
|
||||||
|
if err != nil {
|
||||||
|
return 0, 0, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
len, err := binary.ReadUvarint(reader)
|
||||||
|
if err != nil {
|
||||||
|
return 0, 0, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return int(pathPos), offset, len, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// appendTag appends the $OFFSET$LEN information to the provided $DIGEST.
|
||||||
|
// The [OFFSET; LEN] points to the variable length data where the file locations
|
||||||
// are stored. $DIGEST has length digestLen stored in the cache file file header.
|
// are stored. $DIGEST has length digestLen stored in the cache file file header.
|
||||||
func generateTag(digest string, offset, len uint64) string {
|
func appendTag(digest []byte, offset, len uint64) ([]byte, error) {
|
||||||
return fmt.Sprintf("%s%.20d@%.20d", digest, offset, len)
|
digest = binary.LittleEndian.AppendUint64(digest, offset)
|
||||||
|
digest = binary.LittleEndian.AppendUint64(digest, len)
|
||||||
|
return digest, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type setBigData interface {
|
type setBigData interface {
|
||||||
|
@ -320,6 +380,77 @@ type setBigData interface {
|
||||||
SetLayerBigData(id, key string, data io.Reader) error
|
SetLayerBigData(id, key string, data io.Reader) error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func bloomFilterFromTags(tags [][]byte, digestLen int) *bloomFilter {
|
||||||
|
bloomFilter := newBloomFilter(len(tags)*bloomFilterScale, bloomFilterHashes)
|
||||||
|
for _, t := range tags {
|
||||||
|
bloomFilter.add(t[:digestLen])
|
||||||
|
}
|
||||||
|
return bloomFilter
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeCacheFileToWriter(writer io.Writer, bloomFilter *bloomFilter, tags [][]byte, tagLen, digestLen int, vdata, fnames bytes.Buffer, tagsBuffer *bytes.Buffer) error {
|
||||||
|
sort.Slice(tags, func(i, j int) bool {
|
||||||
|
return bytes.Compare(tags[i], tags[j]) == -1
|
||||||
|
})
|
||||||
|
for _, t := range tags {
|
||||||
|
if _, err := tagsBuffer.Write(t); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// version
|
||||||
|
if err := binary.Write(writer, binary.LittleEndian, uint64(cacheVersion)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// len of a tag
|
||||||
|
if err := binary.Write(writer, binary.LittleEndian, uint64(tagLen)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// len of a digest
|
||||||
|
if err := binary.Write(writer, binary.LittleEndian, uint64(digestLen)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// bloom filter
|
||||||
|
if err := bloomFilter.writeTo(writer); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// tags length
|
||||||
|
if err := binary.Write(writer, binary.LittleEndian, uint64(tagsBuffer.Len())); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// vdata length
|
||||||
|
if err := binary.Write(writer, binary.LittleEndian, uint64(vdata.Len())); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// fnames length
|
||||||
|
if err := binary.Write(writer, binary.LittleEndian, uint64(fnames.Len())); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// tags
|
||||||
|
if _, err := writer.Write(tagsBuffer.Bytes()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// variable length data
|
||||||
|
if _, err := writer.Write(vdata.Bytes()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// file names
|
||||||
|
if _, err := writer.Write(fnames.Bytes()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// writeCache write a cache for the layer ID.
|
// writeCache write a cache for the layer ID.
|
||||||
// It generates a sorted list of digests with their offset to the path location and offset.
|
// It generates a sorted list of digests with their offset to the path location and offset.
|
||||||
// The same cache is used to lookup files, chunks and candidates for deduplication with hard links.
|
// The same cache is used to lookup files, chunks and candidates for deduplication with hard links.
|
||||||
|
@ -328,53 +459,98 @@ type setBigData interface {
|
||||||
// - digest(digest(file.payload) + file.UID + file.GID + file.mode + file.xattrs)
|
// - digest(digest(file.payload) + file.UID + file.GID + file.mode + file.xattrs)
|
||||||
// - digest(i) for each i in chunks(file payload)
|
// - digest(i) for each i in chunks(file payload)
|
||||||
func writeCache(manifest []byte, format graphdriver.DifferOutputFormat, id string, dest setBigData) (*cacheFile, error) {
|
func writeCache(manifest []byte, format graphdriver.DifferOutputFormat, id string, dest setBigData) (*cacheFile, error) {
|
||||||
var vdata bytes.Buffer
|
var vdata, tagsBuffer, fnames bytes.Buffer
|
||||||
tagLen := 0
|
tagLen := 0
|
||||||
digestLen := 0
|
digestLen := 0
|
||||||
var tagsBuffer bytes.Buffer
|
|
||||||
|
|
||||||
toc, err := prepareCacheFile(manifest, format)
|
toc, err := prepareCacheFile(manifest, format)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var tags []string
|
fnamesMap := make(map[string]int)
|
||||||
|
getFileNamePosition := func(name string) (int, error) {
|
||||||
|
if pos, found := fnamesMap[name]; found {
|
||||||
|
return pos, nil
|
||||||
|
}
|
||||||
|
pos := fnames.Len()
|
||||||
|
fnamesMap[name] = pos
|
||||||
|
|
||||||
|
if err := binary.Write(&fnames, binary.LittleEndian, uint32(len(name))); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
if _, err := fnames.WriteString(name); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return pos, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var tags [][]byte
|
||||||
for _, k := range toc {
|
for _, k := range toc {
|
||||||
if k.Digest != "" {
|
if k.Digest != "" {
|
||||||
location := generateFileLocation(k.Name, 0, uint64(k.Size))
|
digest, err := makeBinaryDigest(k.Digest)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
fileNamePos, err := getFileNamePosition(k.Name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
location := generateFileLocation(fileNamePos, 0, uint64(k.Size))
|
||||||
off := uint64(vdata.Len())
|
off := uint64(vdata.Len())
|
||||||
l := uint64(len(location))
|
l := uint64(len(location))
|
||||||
|
|
||||||
d := generateTag(k.Digest, off, l)
|
tag, err := appendTag(digest, off, l)
|
||||||
if tagLen == 0 {
|
if err != nil {
|
||||||
tagLen = len(d)
|
return nil, err
|
||||||
}
|
}
|
||||||
if tagLen != len(d) {
|
if tagLen == 0 {
|
||||||
|
tagLen = len(tag)
|
||||||
|
}
|
||||||
|
if tagLen != len(tag) {
|
||||||
return nil, errors.New("digest with different length found")
|
return nil, errors.New("digest with different length found")
|
||||||
}
|
}
|
||||||
tags = append(tags, d)
|
tags = append(tags, tag)
|
||||||
|
|
||||||
fp, err := calculateHardLinkFingerprint(k)
|
fp, err := calculateHardLinkFingerprint(k)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
d = generateTag(fp, off, l)
|
digestHardLink, err := makeBinaryDigest(fp)
|
||||||
if tagLen != len(d) {
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
tag, err = appendTag(digestHardLink, off, l)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if tagLen != len(tag) {
|
||||||
return nil, errors.New("digest with different length found")
|
return nil, errors.New("digest with different length found")
|
||||||
}
|
}
|
||||||
tags = append(tags, d)
|
tags = append(tags, tag)
|
||||||
|
|
||||||
if _, err := vdata.Write(location); err != nil {
|
if _, err := vdata.Write(location); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
digestLen = len(k.Digest)
|
digestLen = len(digestHardLink)
|
||||||
}
|
}
|
||||||
if k.ChunkDigest != "" {
|
if k.ChunkDigest != "" {
|
||||||
location := generateFileLocation(k.Name, uint64(k.ChunkOffset), uint64(k.ChunkSize))
|
fileNamePos, err := getFileNamePosition(k.Name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
location := generateFileLocation(fileNamePos, uint64(k.ChunkOffset), uint64(k.ChunkSize))
|
||||||
off := uint64(vdata.Len())
|
off := uint64(vdata.Len())
|
||||||
l := uint64(len(location))
|
l := uint64(len(location))
|
||||||
d := generateTag(k.ChunkDigest, off, l)
|
|
||||||
|
digest, err := makeBinaryDigest(k.ChunkDigest)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
d, err := appendTag(digest, off, l)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
if tagLen == 0 {
|
if tagLen == 0 {
|
||||||
tagLen = len(d)
|
tagLen = len(d)
|
||||||
}
|
}
|
||||||
|
@ -386,17 +562,11 @@ func writeCache(manifest []byte, format graphdriver.DifferOutputFormat, id strin
|
||||||
if _, err := vdata.Write(location); err != nil {
|
if _, err := vdata.Write(location); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
digestLen = len(k.ChunkDigest)
|
digestLen = len(digest)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
sort.Strings(tags)
|
bloomFilter := bloomFilterFromTags(tags, digestLen)
|
||||||
|
|
||||||
for _, t := range tags {
|
|
||||||
if _, err := tagsBuffer.Write([]byte(t)); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pipeReader, pipeWriter := io.Pipe()
|
pipeReader, pipeWriter := io.Pipe()
|
||||||
errChan := make(chan error, 1)
|
errChan := make(chan error, 1)
|
||||||
|
@ -404,49 +574,7 @@ func writeCache(manifest []byte, format graphdriver.DifferOutputFormat, id strin
|
||||||
defer pipeWriter.Close()
|
defer pipeWriter.Close()
|
||||||
defer close(errChan)
|
defer close(errChan)
|
||||||
|
|
||||||
// version
|
errChan <- writeCacheFileToWriter(pipeWriter, bloomFilter, tags, tagLen, digestLen, vdata, fnames, &tagsBuffer)
|
||||||
if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(cacheVersion)); err != nil {
|
|
||||||
errChan <- err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// len of a tag
|
|
||||||
if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(tagLen)); err != nil {
|
|
||||||
errChan <- err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// len of a digest
|
|
||||||
if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(digestLen)); err != nil {
|
|
||||||
errChan <- err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// tags length
|
|
||||||
if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(tagsBuffer.Len())); err != nil {
|
|
||||||
errChan <- err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// vdata length
|
|
||||||
if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(vdata.Len())); err != nil {
|
|
||||||
errChan <- err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// tags
|
|
||||||
if _, err := pipeWriter.Write(tagsBuffer.Bytes()); err != nil {
|
|
||||||
errChan <- err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// variable length data
|
|
||||||
if _, err := pipeWriter.Write(vdata.Bytes()); err != nil {
|
|
||||||
errChan <- err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
errChan <- nil
|
|
||||||
}()
|
}()
|
||||||
defer pipeReader.Close()
|
defer pipeReader.Close()
|
||||||
|
|
||||||
|
@ -465,17 +593,20 @@ func writeCache(manifest []byte, format graphdriver.DifferOutputFormat, id strin
|
||||||
logrus.Debugf("Written lookaside cache for layer %q with length %v", id, counter.Count)
|
logrus.Debugf("Written lookaside cache for layer %q with length %v", id, counter.Count)
|
||||||
|
|
||||||
return &cacheFile{
|
return &cacheFile{
|
||||||
digestLen: digestLen,
|
digestLen: digestLen,
|
||||||
tagLen: tagLen,
|
tagLen: tagLen,
|
||||||
tags: tagsBuffer.Bytes(),
|
tags: tagsBuffer.Bytes(),
|
||||||
vdata: vdata.Bytes(),
|
vdata: vdata.Bytes(),
|
||||||
|
fnames: fnames.Bytes(),
|
||||||
|
fnamesLen: len(fnames.Bytes()),
|
||||||
|
bloomFilter: bloomFilter,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func readCacheFileFromMemory(bigDataBuffer []byte) (*cacheFile, error) {
|
func readCacheFileFromMemory(bigDataBuffer []byte) (*cacheFile, error) {
|
||||||
bigData := bytes.NewReader(bigDataBuffer)
|
bigData := bytes.NewReader(bigDataBuffer)
|
||||||
|
|
||||||
var version, tagLen, digestLen, tagsLen, vdataLen uint64
|
var version, tagLen, digestLen, tagsLen, fnamesLen, vdataLen uint64
|
||||||
if err := binary.Read(bigData, binary.LittleEndian, &version); err != nil {
|
if err := binary.Read(bigData, binary.LittleEndian, &version); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -488,6 +619,12 @@ func readCacheFileFromMemory(bigDataBuffer []byte) (*cacheFile, error) {
|
||||||
if err := binary.Read(bigData, binary.LittleEndian, &digestLen); err != nil {
|
if err := binary.Read(bigData, binary.LittleEndian, &digestLen); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bloomFilter, err := readBloomFilter(bigData)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
if err := binary.Read(bigData, binary.LittleEndian, &tagsLen); err != nil {
|
if err := binary.Read(bigData, binary.LittleEndian, &tagsLen); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -495,19 +632,28 @@ func readCacheFileFromMemory(bigDataBuffer []byte) (*cacheFile, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := binary.Read(bigData, binary.LittleEndian, &fnamesLen); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
tags := make([]byte, tagsLen)
|
tags := make([]byte, tagsLen)
|
||||||
if _, err := bigData.Read(tags); err != nil {
|
if _, err := bigData.Read(tags); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// retrieve the unread part of the buffer.
|
// retrieve the unread part of the buffer.
|
||||||
vdata := bigDataBuffer[len(bigDataBuffer)-bigData.Len():]
|
remaining := bigDataBuffer[len(bigDataBuffer)-bigData.Len():]
|
||||||
|
|
||||||
|
vdata := remaining[:vdataLen]
|
||||||
|
fnames := remaining[vdataLen:]
|
||||||
|
|
||||||
return &cacheFile{
|
return &cacheFile{
|
||||||
tagLen: int(tagLen),
|
bloomFilter: bloomFilter,
|
||||||
digestLen: int(digestLen),
|
digestLen: int(digestLen),
|
||||||
tags: tags,
|
fnames: fnames,
|
||||||
vdata: vdata,
|
fnamesLen: int(fnamesLen),
|
||||||
|
tagLen: int(tagLen),
|
||||||
|
tags: tags,
|
||||||
|
vdata: vdata,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -574,34 +720,32 @@ func (c *layersCache) createLayer(id string, cacheFile *cacheFile, mmapBuffer []
|
||||||
return l, nil
|
return l, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func byteSliceAsString(b []byte) string {
|
func findBinaryTag(binaryDigest []byte, cacheFile *cacheFile) (bool, uint64, uint64) {
|
||||||
return *(*string)(unsafe.Pointer(&b))
|
|
||||||
}
|
|
||||||
|
|
||||||
func findTag(digest string, cacheFile *cacheFile) (string, uint64, uint64) {
|
|
||||||
if len(digest) != cacheFile.digestLen {
|
|
||||||
return "", 0, 0
|
|
||||||
}
|
|
||||||
|
|
||||||
nElements := len(cacheFile.tags) / cacheFile.tagLen
|
nElements := len(cacheFile.tags) / cacheFile.tagLen
|
||||||
|
|
||||||
i := sort.Search(nElements, func(i int) bool {
|
i := sort.Search(nElements, func(i int) bool {
|
||||||
d := byteSliceAsString(cacheFile.tags[i*cacheFile.tagLen : i*cacheFile.tagLen+cacheFile.digestLen])
|
d := cacheFile.tags[i*cacheFile.tagLen : i*cacheFile.tagLen+cacheFile.digestLen]
|
||||||
return strings.Compare(d, digest) >= 0
|
return bytes.Compare(d, binaryDigest) >= 0
|
||||||
})
|
})
|
||||||
if i < nElements {
|
if i < nElements {
|
||||||
d := string(cacheFile.tags[i*cacheFile.tagLen : i*cacheFile.tagLen+len(digest)])
|
d := cacheFile.tags[i*cacheFile.tagLen : i*cacheFile.tagLen+cacheFile.digestLen]
|
||||||
if digest == d {
|
if bytes.Equal(binaryDigest, d) {
|
||||||
startOff := i*cacheFile.tagLen + cacheFile.digestLen
|
startOff := i*cacheFile.tagLen + cacheFile.digestLen
|
||||||
parts := strings.Split(string(cacheFile.tags[startOff:(i+1)*cacheFile.tagLen]), "@")
|
|
||||||
|
|
||||||
off, _ := strconv.ParseInt(parts[0], 10, 64)
|
// check for corrupted data, there must be 2 u64 (off and len) after the digest.
|
||||||
|
if cacheFile.tagLen < cacheFile.digestLen+16 {
|
||||||
|
return false, 0, 0
|
||||||
|
}
|
||||||
|
|
||||||
len, _ := strconv.ParseInt(parts[1], 10, 64)
|
offsetAndLen := cacheFile.tags[startOff : (i+1)*cacheFile.tagLen]
|
||||||
return digest, uint64(off), uint64(len)
|
|
||||||
|
off := binary.LittleEndian.Uint64(offsetAndLen[:8])
|
||||||
|
len := binary.LittleEndian.Uint64(offsetAndLen[8:16])
|
||||||
|
|
||||||
|
return true, off, len
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return "", 0, 0
|
return false, 0, 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *layersCache) findDigestInternal(digest string) (string, string, int64, error) {
|
func (c *layersCache) findDigestInternal(digest string) (string, string, int64, error) {
|
||||||
|
@ -609,20 +753,42 @@ func (c *layersCache) findDigestInternal(digest string) (string, string, int64,
|
||||||
return "", "", -1, nil
|
return "", "", -1, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
binaryDigest, err := makeBinaryDigest(digest)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", 0, err
|
||||||
|
}
|
||||||
|
|
||||||
c.mutex.RLock()
|
c.mutex.RLock()
|
||||||
defer c.mutex.RUnlock()
|
defer c.mutex.RUnlock()
|
||||||
|
|
||||||
for _, layer := range c.layers {
|
for _, layer := range c.layers {
|
||||||
digest, off, tagLen := findTag(digest, layer.cacheFile)
|
if !layer.cacheFile.bloomFilter.maybeContains(binaryDigest) {
|
||||||
if digest != "" {
|
continue
|
||||||
position := string(layer.cacheFile.vdata[off : off+tagLen])
|
}
|
||||||
parts := strings.SplitN(position, ":", 3)
|
found, off, tagLen := findBinaryTag(binaryDigest, layer.cacheFile)
|
||||||
if len(parts) != 3 {
|
if found {
|
||||||
continue
|
if uint64(len(layer.cacheFile.vdata)) < off+tagLen {
|
||||||
|
return "", "", 0, fmt.Errorf("corrupted cache file for layer %q", layer.id)
|
||||||
}
|
}
|
||||||
offFile, _ := strconv.ParseInt(parts[0], 10, 64)
|
fileLocationData := layer.cacheFile.vdata[off : off+tagLen]
|
||||||
|
|
||||||
|
fnamePosition, offFile, _, err := parseFileLocation(fileLocationData)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", 0, fmt.Errorf("corrupted cache file for layer %q", layer.id)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(layer.cacheFile.fnames) < fnamePosition+4 {
|
||||||
|
return "", "", 0, fmt.Errorf("corrupted cache file for layer %q", layer.id)
|
||||||
|
}
|
||||||
|
lenPath := int(binary.LittleEndian.Uint32(layer.cacheFile.fnames[fnamePosition : fnamePosition+4]))
|
||||||
|
|
||||||
|
if len(layer.cacheFile.fnames) < fnamePosition+lenPath+4 {
|
||||||
|
return "", "", 0, fmt.Errorf("corrupted cache file for layer %q", layer.id)
|
||||||
|
}
|
||||||
|
path := string(layer.cacheFile.fnames[fnamePosition+4 : fnamePosition+lenPath+4])
|
||||||
|
|
||||||
// parts[1] is the chunk length, currently unused.
|
// parts[1] is the chunk length, currently unused.
|
||||||
return layer.target, parts[2], offFile, nil
|
return layer.target, path, int64(offFile), nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -657,81 +823,90 @@ func unmarshalToc(manifest []byte) (*internal.TOC, error) {
|
||||||
iter := jsoniter.ParseBytes(jsoniter.ConfigFastest, manifest)
|
iter := jsoniter.ParseBytes(jsoniter.ConfigFastest, manifest)
|
||||||
|
|
||||||
for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
|
for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
|
||||||
if strings.ToLower(field) == "version" {
|
switch strings.ToLower(field) {
|
||||||
|
case "version":
|
||||||
toc.Version = iter.ReadInt()
|
toc.Version = iter.ReadInt()
|
||||||
continue
|
|
||||||
}
|
case "entries":
|
||||||
if strings.ToLower(field) != "entries" {
|
for iter.ReadArray() {
|
||||||
iter.Skip()
|
var m internal.FileMetadata
|
||||||
continue
|
for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
|
||||||
}
|
switch strings.ToLower(field) {
|
||||||
for iter.ReadArray() {
|
case "type":
|
||||||
var m internal.FileMetadata
|
m.Type = iter.ReadString()
|
||||||
for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
|
case "name":
|
||||||
switch strings.ToLower(field) {
|
m.Name = iter.ReadString()
|
||||||
case "type":
|
case "linkname":
|
||||||
m.Type = iter.ReadString()
|
m.Linkname = iter.ReadString()
|
||||||
case "name":
|
case "mode":
|
||||||
m.Name = iter.ReadString()
|
m.Mode = iter.ReadInt64()
|
||||||
case "linkname":
|
case "size":
|
||||||
m.Linkname = iter.ReadString()
|
m.Size = iter.ReadInt64()
|
||||||
case "mode":
|
case "uid":
|
||||||
m.Mode = iter.ReadInt64()
|
m.UID = iter.ReadInt()
|
||||||
case "size":
|
case "gid":
|
||||||
m.Size = iter.ReadInt64()
|
m.GID = iter.ReadInt()
|
||||||
case "uid":
|
case "modtime":
|
||||||
m.UID = iter.ReadInt()
|
time, err := time.Parse(time.RFC3339, iter.ReadString())
|
||||||
case "gid":
|
if err != nil {
|
||||||
m.GID = iter.ReadInt()
|
return nil, err
|
||||||
case "modtime":
|
}
|
||||||
time, err := time.Parse(time.RFC3339, iter.ReadString())
|
m.ModTime = &time
|
||||||
if err != nil {
|
case "accesstime":
|
||||||
return nil, err
|
time, err := time.Parse(time.RFC3339, iter.ReadString())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
m.AccessTime = &time
|
||||||
|
case "changetime":
|
||||||
|
time, err := time.Parse(time.RFC3339, iter.ReadString())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
m.ChangeTime = &time
|
||||||
|
case "devmajor":
|
||||||
|
m.Devmajor = iter.ReadInt64()
|
||||||
|
case "devminor":
|
||||||
|
m.Devminor = iter.ReadInt64()
|
||||||
|
case "digest":
|
||||||
|
m.Digest = iter.ReadString()
|
||||||
|
case "offset":
|
||||||
|
m.Offset = iter.ReadInt64()
|
||||||
|
case "endoffset":
|
||||||
|
m.EndOffset = iter.ReadInt64()
|
||||||
|
case "chunksize":
|
||||||
|
m.ChunkSize = iter.ReadInt64()
|
||||||
|
case "chunkoffset":
|
||||||
|
m.ChunkOffset = iter.ReadInt64()
|
||||||
|
case "chunkdigest":
|
||||||
|
m.ChunkDigest = iter.ReadString()
|
||||||
|
case "chunktype":
|
||||||
|
m.ChunkType = iter.ReadString()
|
||||||
|
case "xattrs":
|
||||||
|
m.Xattrs = make(map[string]string)
|
||||||
|
for key := iter.ReadObject(); key != ""; key = iter.ReadObject() {
|
||||||
|
m.Xattrs[key] = iter.ReadString()
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
iter.Skip()
|
||||||
}
|
}
|
||||||
m.ModTime = &time
|
|
||||||
case "accesstime":
|
|
||||||
time, err := time.Parse(time.RFC3339, iter.ReadString())
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
m.AccessTime = &time
|
|
||||||
case "changetime":
|
|
||||||
time, err := time.Parse(time.RFC3339, iter.ReadString())
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
m.ChangeTime = &time
|
|
||||||
case "devmajor":
|
|
||||||
m.Devmajor = iter.ReadInt64()
|
|
||||||
case "devminor":
|
|
||||||
m.Devminor = iter.ReadInt64()
|
|
||||||
case "digest":
|
|
||||||
m.Digest = iter.ReadString()
|
|
||||||
case "offset":
|
|
||||||
m.Offset = iter.ReadInt64()
|
|
||||||
case "endoffset":
|
|
||||||
m.EndOffset = iter.ReadInt64()
|
|
||||||
case "chunksize":
|
|
||||||
m.ChunkSize = iter.ReadInt64()
|
|
||||||
case "chunkoffset":
|
|
||||||
m.ChunkOffset = iter.ReadInt64()
|
|
||||||
case "chunkdigest":
|
|
||||||
m.ChunkDigest = iter.ReadString()
|
|
||||||
case "chunktype":
|
|
||||||
m.ChunkType = iter.ReadString()
|
|
||||||
case "xattrs":
|
|
||||||
m.Xattrs = make(map[string]string)
|
|
||||||
for key := iter.ReadObject(); key != ""; key = iter.ReadObject() {
|
|
||||||
m.Xattrs[key] = iter.ReadString()
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
iter.Skip()
|
|
||||||
}
|
}
|
||||||
|
if m.Type == TypeReg && m.Size == 0 && m.Digest == "" {
|
||||||
|
m.Digest = digestSha256Empty
|
||||||
|
}
|
||||||
|
toc.Entries = append(toc.Entries, m)
|
||||||
}
|
}
|
||||||
if m.Type == TypeReg && m.Size == 0 && m.Digest == "" {
|
|
||||||
m.Digest = digestSha256Empty
|
case "tarsplitdigest": // strings.ToLower("tarSplitDigest")
|
||||||
|
s := iter.ReadString()
|
||||||
|
d, err := digest.Parse(s)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Invalid tarSplitDigest %q: %w", s, err)
|
||||||
}
|
}
|
||||||
toc.Entries = append(toc.Entries, m)
|
toc.TarSplitDigest = d
|
||||||
|
|
||||||
|
default:
|
||||||
|
iter.Skip()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -7,7 +7,6 @@ import (
|
||||||
"io"
|
"io"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/containerd/stargz-snapshotter/estargz"
|
|
||||||
"github.com/containers/storage/pkg/chunked/internal"
|
"github.com/containers/storage/pkg/chunked/internal"
|
||||||
"github.com/klauspost/compress/zstd"
|
"github.com/klauspost/compress/zstd"
|
||||||
"github.com/klauspost/pgzip"
|
"github.com/klauspost/pgzip"
|
||||||
|
@ -33,7 +32,7 @@ func typeToTarType(t string) (byte, error) {
|
||||||
return r, nil
|
return r, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, annotations map[string]string) ([]byte, int64, error) {
|
func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, tocDigest digest.Digest) ([]byte, int64, error) {
|
||||||
// information on the format here https://github.com/containerd/stargz-snapshotter/blob/main/docs/stargz-estargz.md
|
// information on the format here https://github.com/containerd/stargz-snapshotter/blob/main/docs/stargz-estargz.md
|
||||||
footerSize := int64(51)
|
footerSize := int64(51)
|
||||||
if blobSize <= footerSize {
|
if blobSize <= footerSize {
|
||||||
|
@ -126,91 +125,53 @@ func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64,
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
d, err := digest.Parse(annotations[estargz.TOCJSONDigestAnnotation])
|
if manifestDigester.Digest() != tocDigest {
|
||||||
if err != nil {
|
|
||||||
return nil, 0, err
|
|
||||||
}
|
|
||||||
if manifestDigester.Digest() != d {
|
|
||||||
return nil, 0, errors.New("invalid manifest checksum")
|
return nil, 0, errors.New("invalid manifest checksum")
|
||||||
}
|
}
|
||||||
|
|
||||||
return manifestUncompressed, tocOffset, nil
|
return manifestUncompressed, tocOffset, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// readZstdChunkedManifest reads the zstd:chunked manifest from the seekable stream blobStream. The blob total size must
|
// readZstdChunkedManifest reads the zstd:chunked manifest from the seekable stream blobStream.
|
||||||
// be specified.
|
// Returns (manifest blob, parsed manifest, tar-split blob, manifest offset).
|
||||||
// This function uses the io.github.containers.zstd-chunked. annotations when specified.
|
func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Digest, annotations map[string]string) ([]byte, *internal.TOC, []byte, int64, error) {
|
||||||
func readZstdChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, annotations map[string]string) ([]byte, []byte, int64, error) {
|
offsetMetadata := annotations[internal.ManifestInfoKey]
|
||||||
footerSize := int64(internal.FooterSizeSupported)
|
if offsetMetadata == "" {
|
||||||
if blobSize <= footerSize {
|
return nil, nil, nil, 0, fmt.Errorf("%q annotation missing", internal.ManifestInfoKey)
|
||||||
return nil, nil, 0, errors.New("blob too small")
|
|
||||||
}
|
}
|
||||||
|
var manifestChunk ImageSourceChunk
|
||||||
var footerData internal.ZstdChunkedFooterData
|
var manifestLengthUncompressed, manifestType uint64
|
||||||
|
if _, err := fmt.Sscanf(offsetMetadata, "%d:%d:%d:%d", &manifestChunk.Offset, &manifestChunk.Length, &manifestLengthUncompressed, &manifestType); err != nil {
|
||||||
if offsetMetadata := annotations[internal.ManifestInfoKey]; offsetMetadata != "" {
|
return nil, nil, nil, 0, err
|
||||||
var err error
|
}
|
||||||
footerData, err = internal.ReadFooterDataFromAnnotations(annotations)
|
// The tarSplit… values are valid if tarSplitChunk.Offset > 0
|
||||||
if err != nil {
|
var tarSplitChunk ImageSourceChunk
|
||||||
return nil, nil, 0, err
|
var tarSplitLengthUncompressed uint64
|
||||||
}
|
if tarSplitInfoKeyAnnotation, found := annotations[internal.TarSplitInfoKey]; found {
|
||||||
} else {
|
if _, err := fmt.Sscanf(tarSplitInfoKeyAnnotation, "%d:%d:%d", &tarSplitChunk.Offset, &tarSplitChunk.Length, &tarSplitLengthUncompressed); err != nil {
|
||||||
chunk := ImageSourceChunk{
|
return nil, nil, nil, 0, err
|
||||||
Offset: uint64(blobSize - footerSize),
|
|
||||||
Length: uint64(footerSize),
|
|
||||||
}
|
|
||||||
parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk})
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, 0, err
|
|
||||||
}
|
|
||||||
var reader io.ReadCloser
|
|
||||||
select {
|
|
||||||
case r := <-parts:
|
|
||||||
reader = r
|
|
||||||
case err := <-errs:
|
|
||||||
return nil, nil, 0, err
|
|
||||||
}
|
|
||||||
footer := make([]byte, footerSize)
|
|
||||||
if _, err := io.ReadFull(reader, footer); err != nil {
|
|
||||||
return nil, nil, 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
footerData, err = internal.ReadFooterDataFromBlob(footer)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, 0, err
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if footerData.ManifestType != internal.ManifestTypeCRFS {
|
if manifestType != internal.ManifestTypeCRFS {
|
||||||
return nil, nil, 0, errors.New("invalid manifest type")
|
return nil, nil, nil, 0, errors.New("invalid manifest type")
|
||||||
}
|
}
|
||||||
|
|
||||||
// set a reasonable limit
|
// set a reasonable limit
|
||||||
if footerData.LengthCompressed > (1<<20)*50 {
|
if manifestChunk.Length > (1<<20)*50 {
|
||||||
return nil, nil, 0, errors.New("manifest too big")
|
return nil, nil, nil, 0, errors.New("manifest too big")
|
||||||
}
|
}
|
||||||
if footerData.LengthUncompressed > (1<<20)*50 {
|
if manifestLengthUncompressed > (1<<20)*50 {
|
||||||
return nil, nil, 0, errors.New("manifest too big")
|
return nil, nil, nil, 0, errors.New("manifest too big")
|
||||||
}
|
}
|
||||||
|
|
||||||
chunk := ImageSourceChunk{
|
chunks := []ImageSourceChunk{manifestChunk}
|
||||||
Offset: footerData.Offset,
|
if tarSplitChunk.Offset > 0 {
|
||||||
Length: footerData.LengthCompressed,
|
chunks = append(chunks, tarSplitChunk)
|
||||||
}
|
}
|
||||||
|
|
||||||
chunks := []ImageSourceChunk{chunk}
|
|
||||||
|
|
||||||
if footerData.OffsetTarSplit > 0 {
|
|
||||||
chunkTarSplit := ImageSourceChunk{
|
|
||||||
Offset: footerData.OffsetTarSplit,
|
|
||||||
Length: footerData.LengthCompressedTarSplit,
|
|
||||||
}
|
|
||||||
chunks = append(chunks, chunkTarSplit)
|
|
||||||
}
|
|
||||||
|
|
||||||
parts, errs, err := blobStream.GetBlobAt(chunks)
|
parts, errs, err := blobStream.GetBlobAt(chunks)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, 0, err
|
return nil, nil, nil, 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
readBlob := func(len uint64) ([]byte, error) {
|
readBlob := func(len uint64) ([]byte, error) {
|
||||||
|
@ -233,34 +194,39 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, ann
|
||||||
return blob, nil
|
return blob, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
manifest, err := readBlob(footerData.LengthCompressed)
|
manifest, err := readBlob(manifestChunk.Length)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, 0, err
|
return nil, nil, nil, 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
decodedBlob, err := decodeAndValidateBlob(manifest, footerData.LengthUncompressed, footerData.ChecksumAnnotation)
|
decodedBlob, err := decodeAndValidateBlob(manifest, manifestLengthUncompressed, tocDigest.String())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, 0, err
|
return nil, nil, nil, 0, fmt.Errorf("validating and decompressing TOC: %w", err)
|
||||||
}
|
}
|
||||||
|
toc, err := unmarshalToc(decodedBlob)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, 0, fmt.Errorf("unmarshaling TOC: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
decodedTarSplit := []byte{}
|
decodedTarSplit := []byte{}
|
||||||
if footerData.OffsetTarSplit > 0 {
|
if tarSplitChunk.Offset > 0 {
|
||||||
tarSplit, err := readBlob(footerData.LengthCompressedTarSplit)
|
tarSplit, err := readBlob(tarSplitChunk.Length)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, 0, err
|
return nil, nil, nil, 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
decodedTarSplit, err = decodeAndValidateBlob(tarSplit, footerData.LengthUncompressedTarSplit, footerData.ChecksumAnnotationTarSplit)
|
decodedTarSplit, err = decodeAndValidateBlob(tarSplit, tarSplitLengthUncompressed, toc.TarSplitDigest.String())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, 0, err
|
return nil, nil, nil, 0, fmt.Errorf("validating and decompressing tar-split: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return decodedBlob, decodedTarSplit, int64(footerData.Offset), err
|
return decodedBlob, toc, decodedTarSplit, int64(manifestChunk.Offset), err
|
||||||
}
|
}
|
||||||
|
|
||||||
func decodeAndValidateBlob(blob []byte, lengthUncompressed uint64, expectedCompressedChecksum string) ([]byte, error) {
|
func decodeAndValidateBlob(blob []byte, lengthUncompressed uint64, expectedCompressedChecksum string) ([]byte, error) {
|
||||||
d, err := digest.Parse(expectedCompressedChecksum)
|
d, err := digest.Parse(expectedCompressedChecksum)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, fmt.Errorf("invalid digest %q: %w", expectedCompressedChecksum, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
blobDigester := d.Algorithm().Digester()
|
blobDigester := d.Algorithm().Digester()
|
||||||
|
|
|
@ -52,7 +52,7 @@ func escaped(val string, escape int) string {
|
||||||
if noescapeSpace {
|
if noescapeSpace {
|
||||||
hexEscape = !unicode.IsPrint(rune(c))
|
hexEscape = !unicode.IsPrint(rune(c))
|
||||||
} else {
|
} else {
|
||||||
hexEscape = !unicode.IsGraphic(rune(c))
|
hexEscape = !unicode.IsPrint(rune(c)) || unicode.IsSpace(rune(c))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -8,7 +8,6 @@ import (
|
||||||
"archive/tar"
|
"archive/tar"
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"time"
|
"time"
|
||||||
|
@ -19,8 +18,9 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
type TOC struct {
|
type TOC struct {
|
||||||
Version int `json:"version"`
|
Version int `json:"version"`
|
||||||
Entries []FileMetadata `json:"entries"`
|
Entries []FileMetadata `json:"entries"`
|
||||||
|
TarSplitDigest digest.Digest `json:"tarSplitDigest,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type FileMetadata struct {
|
type FileMetadata struct {
|
||||||
|
@ -85,9 +85,10 @@ func GetType(t byte) (string, error) {
|
||||||
const (
|
const (
|
||||||
ManifestChecksumKey = "io.github.containers.zstd-chunked.manifest-checksum"
|
ManifestChecksumKey = "io.github.containers.zstd-chunked.manifest-checksum"
|
||||||
ManifestInfoKey = "io.github.containers.zstd-chunked.manifest-position"
|
ManifestInfoKey = "io.github.containers.zstd-chunked.manifest-position"
|
||||||
TarSplitChecksumKey = "io.github.containers.zstd-chunked.tarsplit-checksum"
|
|
||||||
TarSplitInfoKey = "io.github.containers.zstd-chunked.tarsplit-position"
|
TarSplitInfoKey = "io.github.containers.zstd-chunked.tarsplit-position"
|
||||||
|
|
||||||
|
TarSplitChecksumKey = "io.github.containers.zstd-chunked.tarsplit-checksum" // Deprecated: Use the TOC.TarSplitDigest field instead, this annotation is no longer read nor written.
|
||||||
|
|
||||||
// ManifestTypeCRFS is a manifest file compatible with the CRFS TOC file.
|
// ManifestTypeCRFS is a manifest file compatible with the CRFS TOC file.
|
||||||
ManifestTypeCRFS = 1
|
ManifestTypeCRFS = 1
|
||||||
|
|
||||||
|
@ -134,8 +135,9 @@ func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, off
|
||||||
manifestOffset := offset + zstdSkippableFrameHeader
|
manifestOffset := offset + zstdSkippableFrameHeader
|
||||||
|
|
||||||
toc := TOC{
|
toc := TOC{
|
||||||
Version: 1,
|
Version: 1,
|
||||||
Entries: metadata,
|
Entries: metadata,
|
||||||
|
TarSplitDigest: tarSplitData.Digest,
|
||||||
}
|
}
|
||||||
|
|
||||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||||
|
@ -171,7 +173,6 @@ func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, off
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
outMetadata[TarSplitChecksumKey] = tarSplitData.Digest.String()
|
|
||||||
tarSplitOffset := manifestOffset + uint64(len(compressedManifest)) + zstdSkippableFrameHeader
|
tarSplitOffset := manifestOffset + uint64(len(compressedManifest)) + zstdSkippableFrameHeader
|
||||||
outMetadata[TarSplitInfoKey] = fmt.Sprintf("%d:%d:%d", tarSplitOffset, len(tarSplitData.Data), tarSplitData.UncompressedSize)
|
outMetadata[TarSplitInfoKey] = fmt.Sprintf("%d:%d:%d", tarSplitOffset, len(tarSplitData.Data), tarSplitData.UncompressedSize)
|
||||||
if err := appendZstdSkippableFrame(dest, tarSplitData.Data); err != nil {
|
if err := appendZstdSkippableFrame(dest, tarSplitData.Data); err != nil {
|
||||||
|
@ -183,11 +184,9 @@ func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, off
|
||||||
Offset: manifestOffset,
|
Offset: manifestOffset,
|
||||||
LengthCompressed: uint64(len(compressedManifest)),
|
LengthCompressed: uint64(len(compressedManifest)),
|
||||||
LengthUncompressed: uint64(len(manifest)),
|
LengthUncompressed: uint64(len(manifest)),
|
||||||
ChecksumAnnotation: "", // unused
|
|
||||||
OffsetTarSplit: uint64(tarSplitOffset),
|
OffsetTarSplit: uint64(tarSplitOffset),
|
||||||
LengthCompressedTarSplit: uint64(len(tarSplitData.Data)),
|
LengthCompressedTarSplit: uint64(len(tarSplitData.Data)),
|
||||||
LengthUncompressedTarSplit: uint64(tarSplitData.UncompressedSize),
|
LengthUncompressedTarSplit: uint64(tarSplitData.UncompressedSize),
|
||||||
ChecksumAnnotationTarSplit: "", // unused
|
|
||||||
}
|
}
|
||||||
|
|
||||||
manifestDataLE := footerDataToBlob(footer)
|
manifestDataLE := footerDataToBlob(footer)
|
||||||
|
@ -201,18 +200,22 @@ func ZstdWriterWithLevel(dest io.Writer, level int) (*zstd.Encoder, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// ZstdChunkedFooterData contains all the data stored in the zstd:chunked footer.
|
// ZstdChunkedFooterData contains all the data stored in the zstd:chunked footer.
|
||||||
|
// This footer exists to make the blobs self-describing, our implementation
|
||||||
|
// never reads it:
|
||||||
|
// Partial pull security hinges on the TOC digest, and that exists as a layer annotation;
|
||||||
|
// so we are relying on the layer annotations anyway, and doing so means we can avoid
|
||||||
|
// a round-trip to fetch this binary footer.
|
||||||
type ZstdChunkedFooterData struct {
|
type ZstdChunkedFooterData struct {
|
||||||
ManifestType uint64
|
ManifestType uint64
|
||||||
|
|
||||||
Offset uint64
|
Offset uint64
|
||||||
LengthCompressed uint64
|
LengthCompressed uint64
|
||||||
LengthUncompressed uint64
|
LengthUncompressed uint64
|
||||||
ChecksumAnnotation string // Only used when reading a layer, not when creating it
|
|
||||||
|
|
||||||
OffsetTarSplit uint64
|
OffsetTarSplit uint64
|
||||||
LengthCompressedTarSplit uint64
|
LengthCompressedTarSplit uint64
|
||||||
LengthUncompressedTarSplit uint64
|
LengthUncompressedTarSplit uint64
|
||||||
ChecksumAnnotationTarSplit string // Only used when reading a layer, not when creating it
|
ChecksumAnnotationTarSplit string // Deprecated: This field is not a part of the footer and not used for any purpose.
|
||||||
}
|
}
|
||||||
|
|
||||||
func footerDataToBlob(footer ZstdChunkedFooterData) []byte {
|
func footerDataToBlob(footer ZstdChunkedFooterData) []byte {
|
||||||
|
@ -229,49 +232,3 @@ func footerDataToBlob(footer ZstdChunkedFooterData) []byte {
|
||||||
|
|
||||||
return manifestDataLE
|
return manifestDataLE
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReadFooterDataFromAnnotations reads the zstd:chunked footer data from the given annotations.
|
|
||||||
func ReadFooterDataFromAnnotations(annotations map[string]string) (ZstdChunkedFooterData, error) {
|
|
||||||
var footerData ZstdChunkedFooterData
|
|
||||||
|
|
||||||
footerData.ChecksumAnnotation = annotations[ManifestChecksumKey]
|
|
||||||
if footerData.ChecksumAnnotation == "" {
|
|
||||||
return footerData, fmt.Errorf("manifest checksum annotation %q not found", ManifestChecksumKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
offsetMetadata := annotations[ManifestInfoKey]
|
|
||||||
|
|
||||||
if _, err := fmt.Sscanf(offsetMetadata, "%d:%d:%d:%d", &footerData.Offset, &footerData.LengthCompressed, &footerData.LengthUncompressed, &footerData.ManifestType); err != nil {
|
|
||||||
return footerData, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if tarSplitInfoKeyAnnotation, found := annotations[TarSplitInfoKey]; found {
|
|
||||||
if _, err := fmt.Sscanf(tarSplitInfoKeyAnnotation, "%d:%d:%d", &footerData.OffsetTarSplit, &footerData.LengthCompressedTarSplit, &footerData.LengthUncompressedTarSplit); err != nil {
|
|
||||||
return footerData, err
|
|
||||||
}
|
|
||||||
footerData.ChecksumAnnotationTarSplit = annotations[TarSplitChecksumKey]
|
|
||||||
}
|
|
||||||
return footerData, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadFooterDataFromBlob reads the zstd:chunked footer from the binary buffer.
|
|
||||||
func ReadFooterDataFromBlob(footer []byte) (ZstdChunkedFooterData, error) {
|
|
||||||
var footerData ZstdChunkedFooterData
|
|
||||||
|
|
||||||
if len(footer) < FooterSizeSupported {
|
|
||||||
return footerData, errors.New("blob too small")
|
|
||||||
}
|
|
||||||
footerData.Offset = binary.LittleEndian.Uint64(footer[0:8])
|
|
||||||
footerData.LengthCompressed = binary.LittleEndian.Uint64(footer[8:16])
|
|
||||||
footerData.LengthUncompressed = binary.LittleEndian.Uint64(footer[16:24])
|
|
||||||
footerData.ManifestType = binary.LittleEndian.Uint64(footer[24:32])
|
|
||||||
footerData.OffsetTarSplit = binary.LittleEndian.Uint64(footer[32:40])
|
|
||||||
footerData.LengthCompressedTarSplit = binary.LittleEndian.Uint64(footer[40:48])
|
|
||||||
footerData.LengthUncompressedTarSplit = binary.LittleEndian.Uint64(footer[48:56])
|
|
||||||
|
|
||||||
// the magic number is stored in the last 8 bytes
|
|
||||||
if !bytes.Equal(ZstdChunkedFrameMagic, footer[len(footer)-len(ZstdChunkedFrameMagic):]) {
|
|
||||||
return footerData, errors.New("invalid magic number")
|
|
||||||
}
|
|
||||||
return footerData, nil
|
|
||||||
}
|
|
||||||
|
|
|
@ -25,6 +25,7 @@ import (
|
||||||
"github.com/containers/storage/pkg/archive"
|
"github.com/containers/storage/pkg/archive"
|
||||||
"github.com/containers/storage/pkg/chunked/compressor"
|
"github.com/containers/storage/pkg/chunked/compressor"
|
||||||
"github.com/containers/storage/pkg/chunked/internal"
|
"github.com/containers/storage/pkg/chunked/internal"
|
||||||
|
"github.com/containers/storage/pkg/chunked/toc"
|
||||||
"github.com/containers/storage/pkg/fsverity"
|
"github.com/containers/storage/pkg/fsverity"
|
||||||
"github.com/containers/storage/pkg/idtools"
|
"github.com/containers/storage/pkg/idtools"
|
||||||
"github.com/containers/storage/pkg/system"
|
"github.com/containers/storage/pkg/system"
|
||||||
|
@ -78,6 +79,7 @@ type compressedFileType int
|
||||||
type chunkedDiffer struct {
|
type chunkedDiffer struct {
|
||||||
stream ImageSourceSeekable
|
stream ImageSourceSeekable
|
||||||
manifest []byte
|
manifest []byte
|
||||||
|
toc *internal.TOC // The parsed contents of manifest, or nil if not yet available
|
||||||
tarSplit []byte
|
tarSplit []byte
|
||||||
layersCache *layersCache
|
layersCache *layersCache
|
||||||
tocOffset int64
|
tocOffset int64
|
||||||
|
@ -216,15 +218,15 @@ func (f *seekableFile) GetBlobAt(chunks []ImageSourceChunk) (chan io.ReadCloser,
|
||||||
return streams, errs, nil
|
return streams, errs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func convertTarToZstdChunked(destDirectory string, payload *os.File) (*seekableFile, digest.Digest, map[string]string, error) {
|
func convertTarToZstdChunked(destDirectory string, payload *os.File) (int64, *seekableFile, digest.Digest, map[string]string, error) {
|
||||||
diff, err := archive.DecompressStream(payload)
|
diff, err := archive.DecompressStream(payload)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", nil, err
|
return 0, nil, "", nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
fd, err := unix.Open(destDirectory, unix.O_TMPFILE|unix.O_RDWR|unix.O_CLOEXEC, 0o600)
|
fd, err := unix.Open(destDirectory, unix.O_TMPFILE|unix.O_RDWR|unix.O_CLOEXEC, 0o600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", nil, err
|
return 0, nil, "", nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
f := os.NewFile(uintptr(fd), destDirectory)
|
f := os.NewFile(uintptr(fd), destDirectory)
|
||||||
|
@ -234,23 +236,24 @@ func convertTarToZstdChunked(destDirectory string, payload *os.File) (*seekableF
|
||||||
chunked, err := compressor.ZstdCompressor(f, newAnnotations, &level)
|
chunked, err := compressor.ZstdCompressor(f, newAnnotations, &level)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
f.Close()
|
f.Close()
|
||||||
return nil, "", nil, err
|
return 0, nil, "", nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
convertedOutputDigester := digest.Canonical.Digester()
|
convertedOutputDigester := digest.Canonical.Digester()
|
||||||
if _, err := io.Copy(io.MultiWriter(chunked, convertedOutputDigester.Hash()), diff); err != nil {
|
copied, err := io.Copy(io.MultiWriter(chunked, convertedOutputDigester.Hash()), diff)
|
||||||
|
if err != nil {
|
||||||
f.Close()
|
f.Close()
|
||||||
return nil, "", nil, err
|
return 0, nil, "", nil, err
|
||||||
}
|
}
|
||||||
if err := chunked.Close(); err != nil {
|
if err := chunked.Close(); err != nil {
|
||||||
f.Close()
|
f.Close()
|
||||||
return nil, "", nil, err
|
return 0, nil, "", nil, err
|
||||||
}
|
}
|
||||||
is := seekableFile{
|
is := seekableFile{
|
||||||
file: f,
|
file: f,
|
||||||
}
|
}
|
||||||
|
|
||||||
return &is, convertedOutputDigester.Digest(), newAnnotations, nil
|
return copied, &is, convertedOutputDigester.Digest(), newAnnotations, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetDiffer returns a differ than can be used with ApplyDiffWithDiffer.
|
// GetDiffer returns a differ than can be used with ApplyDiffWithDiffer.
|
||||||
|
@ -264,18 +267,26 @@ func GetDiffer(ctx context.Context, store storage.Store, blobDigest digest.Diges
|
||||||
return nil, errors.New("enable_partial_images not configured")
|
return nil, errors.New("enable_partial_images not configured")
|
||||||
}
|
}
|
||||||
|
|
||||||
_, hasZstdChunkedTOC := annotations[internal.ManifestChecksumKey]
|
zstdChunkedTOCDigestString, hasZstdChunkedTOC := annotations[internal.ManifestChecksumKey]
|
||||||
_, hasEstargzTOC := annotations[estargz.TOCJSONDigestAnnotation]
|
estargzTOCDigestString, hasEstargzTOC := annotations[estargz.TOCJSONDigestAnnotation]
|
||||||
|
|
||||||
if hasZstdChunkedTOC && hasEstargzTOC {
|
if hasZstdChunkedTOC && hasEstargzTOC {
|
||||||
return nil, errors.New("both zstd:chunked and eStargz TOC found")
|
return nil, errors.New("both zstd:chunked and eStargz TOC found")
|
||||||
}
|
}
|
||||||
|
|
||||||
if hasZstdChunkedTOC {
|
if hasZstdChunkedTOC {
|
||||||
return makeZstdChunkedDiffer(ctx, store, blobSize, annotations, iss, &storeOpts)
|
zstdChunkedTOCDigest, err := digest.Parse(zstdChunkedTOCDigestString)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("parsing zstd:chunked TOC digest %q: %w", zstdChunkedTOCDigestString, err)
|
||||||
|
}
|
||||||
|
return makeZstdChunkedDiffer(ctx, store, blobSize, zstdChunkedTOCDigest, annotations, iss, &storeOpts)
|
||||||
}
|
}
|
||||||
if hasEstargzTOC {
|
if hasEstargzTOC {
|
||||||
return makeEstargzChunkedDiffer(ctx, store, blobSize, annotations, iss, &storeOpts)
|
estargzTOCDigest, err := digest.Parse(estargzTOCDigestString)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("parsing estargz TOC digest %q: %w", estargzTOCDigestString, err)
|
||||||
|
}
|
||||||
|
return makeEstargzChunkedDiffer(ctx, store, blobSize, estargzTOCDigest, iss, &storeOpts)
|
||||||
}
|
}
|
||||||
|
|
||||||
return makeConvertFromRawDiffer(ctx, store, blobDigest, blobSize, annotations, iss, &storeOpts)
|
return makeConvertFromRawDiffer(ctx, store, blobDigest, blobSize, annotations, iss, &storeOpts)
|
||||||
|
@ -303,8 +314,8 @@ func makeConvertFromRawDiffer(ctx context.Context, store storage.Store, blobDige
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable, storeOpts *types.StoreOptions) (*chunkedDiffer, error) {
|
func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, tocDigest digest.Digest, annotations map[string]string, iss ImageSourceSeekable, storeOpts *types.StoreOptions) (*chunkedDiffer, error) {
|
||||||
manifest, tarSplit, tocOffset, err := readZstdChunkedManifest(iss, blobSize, annotations)
|
manifest, toc, tarSplit, tocOffset, err := readZstdChunkedManifest(iss, tocDigest, annotations)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("read zstd:chunked manifest: %w", err)
|
return nil, fmt.Errorf("read zstd:chunked manifest: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -313,11 +324,6 @@ func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize in
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
tocDigest, err := digest.Parse(annotations[internal.ManifestChecksumKey])
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("parse TOC digest %q: %w", annotations[internal.ManifestChecksumKey], err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &chunkedDiffer{
|
return &chunkedDiffer{
|
||||||
fsVerityDigests: make(map[string]string),
|
fsVerityDigests: make(map[string]string),
|
||||||
blobSize: blobSize,
|
blobSize: blobSize,
|
||||||
|
@ -326,6 +332,7 @@ func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize in
|
||||||
fileType: fileTypeZstdChunked,
|
fileType: fileTypeZstdChunked,
|
||||||
layersCache: layersCache,
|
layersCache: layersCache,
|
||||||
manifest: manifest,
|
manifest: manifest,
|
||||||
|
toc: toc,
|
||||||
storeOpts: storeOpts,
|
storeOpts: storeOpts,
|
||||||
stream: iss,
|
stream: iss,
|
||||||
tarSplit: tarSplit,
|
tarSplit: tarSplit,
|
||||||
|
@ -333,8 +340,8 @@ func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize in
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeEstargzChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable, storeOpts *types.StoreOptions) (*chunkedDiffer, error) {
|
func makeEstargzChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, tocDigest digest.Digest, iss ImageSourceSeekable, storeOpts *types.StoreOptions) (*chunkedDiffer, error) {
|
||||||
manifest, tocOffset, err := readEstargzChunkedManifest(iss, blobSize, annotations)
|
manifest, tocOffset, err := readEstargzChunkedManifest(iss, blobSize, tocDigest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("read zstd:chunked manifest: %w", err)
|
return nil, fmt.Errorf("read zstd:chunked manifest: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -343,11 +350,6 @@ func makeEstargzChunkedDiffer(ctx context.Context, store storage.Store, blobSize
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
tocDigest, err := digest.Parse(annotations[estargz.TOCJSONDigestAnnotation])
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("parse TOC digest %q: %w", annotations[estargz.TOCJSONDigestAnnotation], err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &chunkedDiffer{
|
return &chunkedDiffer{
|
||||||
fsVerityDigests: make(map[string]string),
|
fsVerityDigests: make(map[string]string),
|
||||||
blobSize: blobSize,
|
blobSize: blobSize,
|
||||||
|
@ -1653,6 +1655,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
||||||
stream := c.stream
|
stream := c.stream
|
||||||
|
|
||||||
var uncompressedDigest digest.Digest
|
var uncompressedDigest digest.Digest
|
||||||
|
var convertedBlobSize int64
|
||||||
|
|
||||||
if c.convertToZstdChunked {
|
if c.convertToZstdChunked {
|
||||||
fd, err := unix.Open(dest, unix.O_TMPFILE|unix.O_RDWR|unix.O_CLOEXEC, 0o600)
|
fd, err := unix.Open(dest, unix.O_TMPFILE|unix.O_RDWR|unix.O_CLOEXEC, 0o600)
|
||||||
|
@ -1680,10 +1683,11 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
||||||
return graphdriver.DriverWithDifferOutput{}, err
|
return graphdriver.DriverWithDifferOutput{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
fileSource, diffID, annotations, err := convertTarToZstdChunked(dest, blobFile)
|
tarSize, fileSource, diffID, annotations, err := convertTarToZstdChunked(dest, blobFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return graphdriver.DriverWithDifferOutput{}, err
|
return graphdriver.DriverWithDifferOutput{}, err
|
||||||
}
|
}
|
||||||
|
convertedBlobSize = tarSize
|
||||||
// fileSource is a O_TMPFILE file descriptor, so we
|
// fileSource is a O_TMPFILE file descriptor, so we
|
||||||
// need to keep it open until the entire file is processed.
|
// need to keep it open until the entire file is processed.
|
||||||
defer fileSource.Close()
|
defer fileSource.Close()
|
||||||
|
@ -1692,7 +1696,14 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
||||||
blobFile.Close()
|
blobFile.Close()
|
||||||
blobFile = nil
|
blobFile = nil
|
||||||
|
|
||||||
manifest, tarSplit, tocOffset, err := readZstdChunkedManifest(fileSource, c.blobSize, annotations)
|
tocDigest, err := toc.GetTOCDigest(annotations)
|
||||||
|
if err != nil {
|
||||||
|
return graphdriver.DriverWithDifferOutput{}, fmt.Errorf("internal error: parsing just-created zstd:chunked TOC digest: %w", err)
|
||||||
|
}
|
||||||
|
if tocDigest == nil {
|
||||||
|
return graphdriver.DriverWithDifferOutput{}, fmt.Errorf("internal error: just-created zstd:chunked missing TOC digest")
|
||||||
|
}
|
||||||
|
manifest, toc, tarSplit, tocOffset, err := readZstdChunkedManifest(fileSource, *tocDigest, annotations)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return graphdriver.DriverWithDifferOutput{}, fmt.Errorf("read zstd:chunked manifest: %w", err)
|
return graphdriver.DriverWithDifferOutput{}, fmt.Errorf("read zstd:chunked manifest: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -1703,6 +1714,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
||||||
// fill the chunkedDiffer with the data we just read.
|
// fill the chunkedDiffer with the data we just read.
|
||||||
c.fileType = fileTypeZstdChunked
|
c.fileType = fileTypeZstdChunked
|
||||||
c.manifest = manifest
|
c.manifest = manifest
|
||||||
|
c.toc = toc
|
||||||
c.tarSplit = tarSplit
|
c.tarSplit = tarSplit
|
||||||
c.tocOffset = tocOffset
|
c.tocOffset = tocOffset
|
||||||
|
|
||||||
|
@ -1723,9 +1735,13 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
||||||
}
|
}
|
||||||
|
|
||||||
// Generate the manifest
|
// Generate the manifest
|
||||||
toc, err := unmarshalToc(c.manifest)
|
toc := c.toc
|
||||||
if err != nil {
|
if toc == nil {
|
||||||
return graphdriver.DriverWithDifferOutput{}, err
|
toc_, err := unmarshalToc(c.manifest)
|
||||||
|
if err != nil {
|
||||||
|
return graphdriver.DriverWithDifferOutput{}, err
|
||||||
|
}
|
||||||
|
toc = toc_
|
||||||
}
|
}
|
||||||
|
|
||||||
output := graphdriver.DriverWithDifferOutput{
|
output := graphdriver.DriverWithDifferOutput{
|
||||||
|
@ -1753,13 +1769,19 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
||||||
|
|
||||||
var missingParts []missingPart
|
var missingParts []missingPart
|
||||||
|
|
||||||
mergedEntries, totalSize, err := c.mergeTocEntries(c.fileType, toc.Entries)
|
mergedEntries, totalSizeFromTOC, err := c.mergeTocEntries(c.fileType, toc.Entries)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return output, err
|
return output, err
|
||||||
}
|
}
|
||||||
|
|
||||||
output.UIDs, output.GIDs = collectIDs(mergedEntries)
|
output.UIDs, output.GIDs = collectIDs(mergedEntries)
|
||||||
output.Size = totalSize
|
if convertedBlobSize > 0 {
|
||||||
|
// if the image was converted, store the original tar size, so that
|
||||||
|
// it can be recreated correctly.
|
||||||
|
output.Size = convertedBlobSize
|
||||||
|
} else {
|
||||||
|
output.Size = totalSizeFromTOC
|
||||||
|
}
|
||||||
|
|
||||||
if err := maybeDoIDRemap(mergedEntries, options); err != nil {
|
if err := maybeDoIDRemap(mergedEntries, options); err != nil {
|
||||||
return output, err
|
return output, err
|
||||||
|
|
|
@ -5,72 +5,6 @@ import (
|
||||||
"os"
|
"os"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ThinpoolOptionsConfig represents the "storage.options.thinpool"
|
|
||||||
// TOML config table.
|
|
||||||
type ThinpoolOptionsConfig struct {
|
|
||||||
// AutoExtendPercent determines the amount by which pool needs to be
|
|
||||||
// grown. This is specified in terms of % of pool size. So a value of
|
|
||||||
// 20 means that when threshold is hit, pool will be grown by 20% of
|
|
||||||
// existing pool size.
|
|
||||||
AutoExtendPercent string `toml:"autoextend_percent,omitempty"`
|
|
||||||
|
|
||||||
// AutoExtendThreshold determines the pool extension threshold in terms
|
|
||||||
// of percentage of pool size. For example, if threshold is 60, that
|
|
||||||
// means when pool is 60% full, threshold has been hit.
|
|
||||||
AutoExtendThreshold string `toml:"autoextend_threshold,omitempty"`
|
|
||||||
|
|
||||||
// BaseSize specifies the size to use when creating the base device,
|
|
||||||
// which limits the size of images and containers.
|
|
||||||
BaseSize string `toml:"basesize,omitempty"`
|
|
||||||
|
|
||||||
// BlockSize specifies a custom blocksize to use for the thin pool.
|
|
||||||
BlockSize string `toml:"blocksize,omitempty"`
|
|
||||||
|
|
||||||
// DirectLvmDevice specifies a custom block storage device to use for
|
|
||||||
// the thin pool.
|
|
||||||
DirectLvmDevice string `toml:"directlvm_device,omitempty"`
|
|
||||||
|
|
||||||
// DirectLvmDeviceForcewipes device even if device already has a
|
|
||||||
// filesystem
|
|
||||||
DirectLvmDeviceForce string `toml:"directlvm_device_force,omitempty"`
|
|
||||||
|
|
||||||
// Fs specifies the filesystem type to use for the base device.
|
|
||||||
Fs string `toml:"fs,omitempty"`
|
|
||||||
|
|
||||||
// log_level sets the log level of devicemapper.
|
|
||||||
LogLevel string `toml:"log_level,omitempty"`
|
|
||||||
|
|
||||||
// MetadataSize specifies the size of the metadata for the thinpool
|
|
||||||
// It will be used with the `pvcreate --metadata` option.
|
|
||||||
MetadataSize string `toml:"metadatasize,omitempty"`
|
|
||||||
|
|
||||||
// MinFreeSpace specifies the min free space percent in a thin pool
|
|
||||||
// require for new device creation to
|
|
||||||
MinFreeSpace string `toml:"min_free_space,omitempty"`
|
|
||||||
|
|
||||||
// MkfsArg specifies extra mkfs arguments to be used when creating the
|
|
||||||
// basedevice.
|
|
||||||
MkfsArg string `toml:"mkfsarg,omitempty"`
|
|
||||||
|
|
||||||
// MountOpt specifies extra mount options used when mounting the thin
|
|
||||||
// devices.
|
|
||||||
MountOpt string `toml:"mountopt,omitempty"`
|
|
||||||
|
|
||||||
// Size
|
|
||||||
Size string `toml:"size,omitempty"`
|
|
||||||
|
|
||||||
// UseDeferredDeletion marks device for deferred deletion
|
|
||||||
UseDeferredDeletion string `toml:"use_deferred_deletion,omitempty"`
|
|
||||||
|
|
||||||
// UseDeferredRemoval marks device for deferred removal
|
|
||||||
UseDeferredRemoval string `toml:"use_deferred_removal,omitempty"`
|
|
||||||
|
|
||||||
// XfsNoSpaceMaxRetriesFreeSpace specifies the maximum number of
|
|
||||||
// retries XFS should attempt to complete IO when ENOSPC (no space)
|
|
||||||
// error is returned by underlying storage device.
|
|
||||||
XfsNoSpaceMaxRetries string `toml:"xfs_nospace_max_retries,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type AufsOptionsConfig struct {
|
type AufsOptionsConfig struct {
|
||||||
// MountOpt specifies extra mount options used when mounting
|
// MountOpt specifies extra mount options used when mounting
|
||||||
MountOpt string `toml:"mountopt,omitempty"`
|
MountOpt string `toml:"mountopt,omitempty"`
|
||||||
|
@ -181,8 +115,8 @@ type OptionsConfig struct {
|
||||||
// Btrfs container options to be handed to btrfs drivers
|
// Btrfs container options to be handed to btrfs drivers
|
||||||
Btrfs struct{ BtrfsOptionsConfig } `toml:"btrfs,omitempty"`
|
Btrfs struct{ BtrfsOptionsConfig } `toml:"btrfs,omitempty"`
|
||||||
|
|
||||||
// Thinpool container options to be handed to thinpool drivers
|
// Thinpool container options to be handed to thinpool drivers (NOP)
|
||||||
Thinpool struct{ ThinpoolOptionsConfig } `toml:"thinpool,omitempty"`
|
Thinpool struct{} `toml:"thinpool,omitempty"`
|
||||||
|
|
||||||
// Overlay container options to be handed to overlay drivers
|
// Overlay container options to be handed to overlay drivers
|
||||||
Overlay struct{ OverlayOptionsConfig } `toml:"overlay,omitempty"`
|
Overlay struct{ OverlayOptionsConfig } `toml:"overlay,omitempty"`
|
||||||
|
@ -231,62 +165,6 @@ func GetGraphDriverOptions(driverName string, options OptionsConfig) []string {
|
||||||
doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Size))
|
doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Size))
|
||||||
}
|
}
|
||||||
|
|
||||||
case "devicemapper":
|
|
||||||
if options.Thinpool.AutoExtendPercent != "" {
|
|
||||||
doptions = append(doptions, fmt.Sprintf("dm.thinp_autoextend_percent=%s", options.Thinpool.AutoExtendPercent))
|
|
||||||
}
|
|
||||||
if options.Thinpool.AutoExtendThreshold != "" {
|
|
||||||
doptions = append(doptions, fmt.Sprintf("dm.thinp_autoextend_threshold=%s", options.Thinpool.AutoExtendThreshold))
|
|
||||||
}
|
|
||||||
if options.Thinpool.BaseSize != "" {
|
|
||||||
doptions = append(doptions, fmt.Sprintf("dm.basesize=%s", options.Thinpool.BaseSize))
|
|
||||||
}
|
|
||||||
if options.Thinpool.BlockSize != "" {
|
|
||||||
doptions = append(doptions, fmt.Sprintf("dm.blocksize=%s", options.Thinpool.BlockSize))
|
|
||||||
}
|
|
||||||
if options.Thinpool.DirectLvmDevice != "" {
|
|
||||||
doptions = append(doptions, fmt.Sprintf("dm.directlvm_device=%s", options.Thinpool.DirectLvmDevice))
|
|
||||||
}
|
|
||||||
if options.Thinpool.DirectLvmDeviceForce != "" {
|
|
||||||
doptions = append(doptions, fmt.Sprintf("dm.directlvm_device_force=%s", options.Thinpool.DirectLvmDeviceForce))
|
|
||||||
}
|
|
||||||
if options.Thinpool.Fs != "" {
|
|
||||||
doptions = append(doptions, fmt.Sprintf("dm.fs=%s", options.Thinpool.Fs))
|
|
||||||
}
|
|
||||||
if options.Thinpool.LogLevel != "" {
|
|
||||||
doptions = append(doptions, fmt.Sprintf("dm.libdm_log_level=%s", options.Thinpool.LogLevel))
|
|
||||||
}
|
|
||||||
if options.Thinpool.MetadataSize != "" {
|
|
||||||
doptions = append(doptions, fmt.Sprintf("dm.metadata_size=%s", options.Thinpool.MetadataSize))
|
|
||||||
}
|
|
||||||
if options.Thinpool.MinFreeSpace != "" {
|
|
||||||
doptions = append(doptions, fmt.Sprintf("dm.min_free_space=%s", options.Thinpool.MinFreeSpace))
|
|
||||||
}
|
|
||||||
if options.Thinpool.MkfsArg != "" {
|
|
||||||
doptions = append(doptions, fmt.Sprintf("dm.mkfsarg=%s", options.Thinpool.MkfsArg))
|
|
||||||
}
|
|
||||||
if options.Thinpool.MountOpt != "" {
|
|
||||||
doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.Thinpool.MountOpt))
|
|
||||||
} else if options.MountOpt != "" {
|
|
||||||
doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.MountOpt))
|
|
||||||
}
|
|
||||||
|
|
||||||
if options.Thinpool.Size != "" {
|
|
||||||
doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Thinpool.Size))
|
|
||||||
} else if options.Size != "" {
|
|
||||||
doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Size))
|
|
||||||
}
|
|
||||||
|
|
||||||
if options.Thinpool.UseDeferredDeletion != "" {
|
|
||||||
doptions = append(doptions, fmt.Sprintf("dm.use_deferred_deletion=%s", options.Thinpool.UseDeferredDeletion))
|
|
||||||
}
|
|
||||||
if options.Thinpool.UseDeferredRemoval != "" {
|
|
||||||
doptions = append(doptions, fmt.Sprintf("dm.use_deferred_removal=%s", options.Thinpool.UseDeferredRemoval))
|
|
||||||
}
|
|
||||||
if options.Thinpool.XfsNoSpaceMaxRetries != "" {
|
|
||||||
doptions = append(doptions, fmt.Sprintf("dm.xfs_nospace_max_retries=%s", options.Thinpool.XfsNoSpaceMaxRetries))
|
|
||||||
}
|
|
||||||
|
|
||||||
case "overlay", "overlay2":
|
case "overlay", "overlay2":
|
||||||
// Specify whether composefs must be used to mount the data layers
|
// Specify whether composefs must be used to mount the data layers
|
||||||
if options.Overlay.IgnoreChownErrors != "" {
|
if options.Overlay.IgnoreChownErrors != "" {
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue