2013-11-05 19:16:36 +08:00
|
|
|
|
package sftp
|
|
|
|
|
|
|
|
|
|
import (
|
2015-05-23 15:10:22 +08:00
|
|
|
|
"bytes"
|
|
|
|
|
"encoding/binary"
|
2013-11-05 19:16:36 +08:00
|
|
|
|
"io"
|
2021-01-23 00:45:57 +08:00
|
|
|
|
"math"
|
2013-11-05 19:16:36 +08:00
|
|
|
|
"os"
|
2013-11-06 12:40:35 +08:00
|
|
|
|
"path"
|
2020-08-19 19:09:42 +08:00
|
|
|
|
"sync"
|
2015-05-23 15:10:22 +08:00
|
|
|
|
"sync/atomic"
|
2018-04-25 23:11:13 +08:00
|
|
|
|
"syscall"
|
2013-12-11 07:18:53 +08:00
|
|
|
|
"time"
|
2013-11-05 19:16:36 +08:00
|
|
|
|
|
2013-11-11 09:57:03 +08:00
|
|
|
|
"github.com/kr/fs"
|
2016-05-19 13:16:48 +08:00
|
|
|
|
"github.com/pkg/errors"
|
2015-09-07 12:54:42 +08:00
|
|
|
|
"golang.org/x/crypto/ssh"
|
2013-11-05 19:16:36 +08:00
|
|
|
|
)
|
|
|
|
|
|
2019-08-30 23:04:37 +08:00
|
|
|
|
var (
|
|
|
|
|
// ErrInternalInconsistency indicates the packets sent and the data queued to be
|
|
|
|
|
// written to the file don't match up. It is an unusual error and usually is
|
|
|
|
|
// caused by bad behavior server side or connection issues. The error is
|
|
|
|
|
// limited in scope to the call where it happened, the client object is still
|
|
|
|
|
// OK to use as long as the connection is still open.
|
|
|
|
|
ErrInternalInconsistency = errors.New("internal inconsistency")
|
|
|
|
|
// InternalInconsistency alias for ErrInternalInconsistency.
|
|
|
|
|
//
|
|
|
|
|
// Deprecated: please use ErrInternalInconsistency
|
|
|
|
|
InternalInconsistency = ErrInternalInconsistency
|
|
|
|
|
)
|
2017-07-26 12:04:32 +08:00
|
|
|
|
|
2017-07-24 08:47:38 +08:00
|
|
|
|
// A ClientOption is a function which applies configuration to a Client.
|
|
|
|
|
type ClientOption func(*Client) error
|
|
|
|
|
|
2018-01-16 03:12:47 +08:00
|
|
|
|
// MaxPacketChecked sets the maximum size of the payload, measured in bytes.
|
|
|
|
|
// This option only accepts sizes servers should support, ie. <= 32768 bytes.
|
2017-07-24 07:40:34 +08:00
|
|
|
|
//
|
2018-01-16 03:12:47 +08:00
|
|
|
|
// If you get the error "failed to send packet header: EOF" when copying a
|
|
|
|
|
// large file, try lowering this number.
|
|
|
|
|
//
|
|
|
|
|
// The default packet size is 32768 bytes.
|
|
|
|
|
func MaxPacketChecked(size int) ClientOption {
|
2015-05-23 19:53:59 +08:00
|
|
|
|
return func(c *Client) error {
|
2018-01-16 03:12:47 +08:00
|
|
|
|
if size < 1 {
|
|
|
|
|
return errors.Errorf("size must be greater or equal to 1")
|
2015-05-23 19:53:59 +08:00
|
|
|
|
}
|
2018-01-16 03:12:47 +08:00
|
|
|
|
if size > 32768 {
|
|
|
|
|
return errors.Errorf("sizes larger than 32KB might not work with all servers")
|
2017-07-24 07:40:34 +08:00
|
|
|
|
}
|
2015-05-23 19:53:59 +08:00
|
|
|
|
c.maxPacket = size
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-16 03:12:47 +08:00
|
|
|
|
// MaxPacketUnchecked sets the maximum size of the payload, measured in bytes.
|
|
|
|
|
// It accepts sizes larger than the 32768 bytes all servers should support.
|
|
|
|
|
// Only use a setting higher than 32768 if your application always connects to
|
|
|
|
|
// the same server or after sufficiently broad testing.
|
|
|
|
|
//
|
|
|
|
|
// If you get the error "failed to send packet header: EOF" when copying a
|
|
|
|
|
// large file, try lowering this number.
|
|
|
|
|
//
|
|
|
|
|
// The default packet size is 32768 bytes.
|
|
|
|
|
func MaxPacketUnchecked(size int) ClientOption {
|
|
|
|
|
return func(c *Client) error {
|
|
|
|
|
if size < 1 {
|
|
|
|
|
return errors.Errorf("size must be greater or equal to 1")
|
|
|
|
|
}
|
|
|
|
|
c.maxPacket = size
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// MaxPacket sets the maximum size of the payload, measured in bytes.
|
|
|
|
|
// This option only accepts sizes servers should support, ie. <= 32768 bytes.
|
|
|
|
|
// This is a synonym for MaxPacketChecked that provides backward compatibility.
|
|
|
|
|
//
|
|
|
|
|
// If you get the error "failed to send packet header: EOF" when copying a
|
|
|
|
|
// large file, try lowering this number.
|
|
|
|
|
//
|
|
|
|
|
// The default packet size is 32768 bytes.
|
|
|
|
|
func MaxPacket(size int) ClientOption {
|
|
|
|
|
return MaxPacketChecked(size)
|
|
|
|
|
}
|
|
|
|
|
|
2018-05-25 21:43:09 +08:00
|
|
|
|
// MaxConcurrentRequestsPerFile sets the maximum concurrent requests allowed for a single file.
|
|
|
|
|
//
|
|
|
|
|
// The default maximum concurrent requests is 64.
|
|
|
|
|
func MaxConcurrentRequestsPerFile(n int) ClientOption {
|
|
|
|
|
return func(c *Client) error {
|
|
|
|
|
if n < 1 {
|
|
|
|
|
return errors.Errorf("n must be greater or equal to 1")
|
|
|
|
|
}
|
|
|
|
|
c.maxConcurrentRequests = n
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-02-22 04:32:09 +08:00
|
|
|
|
// UseConcurrentWrites allows the Client to perform concurrent Writes.
|
|
|
|
|
//
|
|
|
|
|
// Using concurrency while doing writes, requires special consideration.
|
|
|
|
|
// A write to a later offset in a file after an error,
|
|
|
|
|
// could end up with a file length longer than what was successfully written.
|
|
|
|
|
//
|
|
|
|
|
// When using this option, if you receive an error during `io.Copy` or `io.WriteTo`,
|
|
|
|
|
// you may need to `Truncate` the target Writer to avoid “holes” in the data written.
|
|
|
|
|
func UseConcurrentWrites(value bool) ClientOption {
|
|
|
|
|
return func(c *Client) error {
|
|
|
|
|
c.useConcurrentWrites = value
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// UseFstat sets whether to use Fstat or Stat when File.WriteTo is called
|
|
|
|
|
// (usually when copying files).
|
|
|
|
|
// Some servers limit the amount of open files and calling Stat after opening
|
|
|
|
|
// the file will throw an error From the server. Setting this flag will call
|
|
|
|
|
// Fstat instead of Stat which is suppose to be called on an open file handle.
|
|
|
|
|
//
|
|
|
|
|
// It has been found that that with IBM Sterling SFTP servers which have
|
|
|
|
|
// "extractability" level set to 1 which means only 1 file can be opened at
|
|
|
|
|
// any given time.
|
|
|
|
|
//
|
|
|
|
|
// If the server you are working with still has an issue with both Stat and
|
|
|
|
|
// Fstat calls you can always open a file and read it until the end.
|
|
|
|
|
//
|
|
|
|
|
// Another reason to read the file until its end and Fstat doesn't work is
|
|
|
|
|
// that in some servers, reading a full file will automatically delete the
|
|
|
|
|
// file as some of these mainframes map the file to a message in a queue.
|
|
|
|
|
// Once the file has been read it will get deleted.
|
|
|
|
|
func UseFstat(value bool) ClientOption {
|
|
|
|
|
return func(c *Client) error {
|
|
|
|
|
c.useFstat = value
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Client represents an SFTP session on a *ssh.ClientConn SSH connection.
|
|
|
|
|
// Multiple Clients can be active on a single SSH connection, and a Client
|
|
|
|
|
// may be called concurrently from multiple Goroutines.
|
|
|
|
|
//
|
|
|
|
|
// Client implements the github.com/kr/fs.FileSystem interface.
|
|
|
|
|
type Client struct {
|
|
|
|
|
clientConn
|
|
|
|
|
|
|
|
|
|
ext map[string]string // Extensions (name -> data).
|
|
|
|
|
|
|
|
|
|
maxPacket int // max packet size read or written.
|
|
|
|
|
maxConcurrentRequests int
|
|
|
|
|
nextid uint32
|
|
|
|
|
|
|
|
|
|
// write concurrency is… error prone.
|
|
|
|
|
// Default behavior should be to not use it.
|
|
|
|
|
useConcurrentWrites bool
|
|
|
|
|
useFstat bool
|
|
|
|
|
}
|
|
|
|
|
|
2016-01-08 04:11:16 +08:00
|
|
|
|
// NewClient creates a new SFTP client on conn, using zero or more option
|
|
|
|
|
// functions.
|
2017-07-24 08:47:38 +08:00
|
|
|
|
func NewClient(conn *ssh.Client, opts ...ClientOption) (*Client, error) {
|
2013-11-05 19:16:36 +08:00
|
|
|
|
s, err := conn.NewSession()
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
if err := s.RequestSubsystem("sftp"); err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
pw, err := s.StdinPipe()
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
pr, err := s.StdoutPipe()
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
2014-10-10 02:49:08 +08:00
|
|
|
|
|
2015-05-23 19:53:59 +08:00
|
|
|
|
return NewClientPipe(pr, pw, opts...)
|
2013-11-05 19:16:36 +08:00
|
|
|
|
}
|
|
|
|
|
|
2014-09-30 06:43:45 +08:00
|
|
|
|
// NewClientPipe creates a new SFTP client given a Reader and a WriteCloser.
|
|
|
|
|
// This can be used for connecting to an SFTP server over TCP/TLS or by using
|
|
|
|
|
// the system's ssh client program (e.g. via exec.Command).
|
2017-07-24 08:47:38 +08:00
|
|
|
|
func NewClientPipe(rd io.Reader, wr io.WriteCloser, opts ...ClientOption) (*Client, error) {
|
2014-09-30 06:43:45 +08:00
|
|
|
|
sftp := &Client{
|
2016-06-15 16:07:14 +08:00
|
|
|
|
clientConn: clientConn{
|
|
|
|
|
conn: conn{
|
2016-06-15 18:19:51 +08:00
|
|
|
|
Reader: rd,
|
|
|
|
|
WriteCloser: wr,
|
2016-06-15 16:07:14 +08:00
|
|
|
|
},
|
|
|
|
|
inflight: make(map[uint32]chan<- result),
|
2018-12-05 16:30:09 +08:00
|
|
|
|
closed: make(chan struct{}),
|
2016-06-15 09:50:02 +08:00
|
|
|
|
},
|
2020-10-11 18:42:49 +08:00
|
|
|
|
|
|
|
|
|
ext: make(map[string]string),
|
|
|
|
|
|
2018-05-25 21:43:09 +08:00
|
|
|
|
maxPacket: 1 << 15,
|
|
|
|
|
maxConcurrentRequests: 64,
|
2015-05-23 19:53:59 +08:00
|
|
|
|
}
|
2021-02-22 04:32:09 +08:00
|
|
|
|
|
|
|
|
|
for _, opt := range opts {
|
|
|
|
|
if err := opt(sftp); err != nil {
|
|
|
|
|
wr.Close()
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
2014-09-30 06:43:45 +08:00
|
|
|
|
}
|
2021-02-22 04:32:09 +08:00
|
|
|
|
|
2014-09-30 06:43:45 +08:00
|
|
|
|
if err := sftp.sendInit(); err != nil {
|
2015-05-23 19:53:59 +08:00
|
|
|
|
wr.Close()
|
2014-09-30 06:43:45 +08:00
|
|
|
|
return nil, err
|
|
|
|
|
}
|
2015-06-03 04:03:39 +08:00
|
|
|
|
if err := sftp.recvVersion(); err != nil {
|
|
|
|
|
wr.Close()
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
2021-02-22 04:32:09 +08:00
|
|
|
|
|
2016-06-15 16:23:51 +08:00
|
|
|
|
sftp.clientConn.wg.Add(1)
|
|
|
|
|
go sftp.loop()
|
2020-10-11 18:42:49 +08:00
|
|
|
|
|
2021-02-22 04:32:09 +08:00
|
|
|
|
return sftp, nil
|
2013-11-05 19:16:36 +08:00
|
|
|
|
}
|
|
|
|
|
|
2018-02-07 06:03:47 +08:00
|
|
|
|
// Create creates the named file mode 0666 (before umask), truncating it if it
|
|
|
|
|
// already exists. If successful, methods on the returned File can be used for
|
|
|
|
|
// I/O; the associated file descriptor has mode O_RDWR. If you need more
|
|
|
|
|
// control over the flags/mode used to open the file see client.OpenFile.
|
2019-10-07 07:49:31 +08:00
|
|
|
|
//
|
|
|
|
|
// Note that some SFTP servers (eg. AWS Transfer) do not support opening files
|
|
|
|
|
// read/write at the same time. For those services you will need to use
|
|
|
|
|
// `client.OpenFile(os.O_WRONLY|os.O_CREATE|os.O_TRUNC)`.
|
2013-11-06 10:04:40 +08:00
|
|
|
|
func (c *Client) Create(path string) (*File, error) {
|
2013-11-14 12:32:21 +08:00
|
|
|
|
return c.open(path, flags(os.O_RDWR|os.O_CREATE|os.O_TRUNC))
|
2013-11-06 10:04:40 +08:00
|
|
|
|
}
|
|
|
|
|
|
2014-10-10 02:45:30 +08:00
|
|
|
|
const sftpProtocolVersion = 3 // http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02
|
|
|
|
|
|
2013-11-05 19:16:36 +08:00
|
|
|
|
func (c *Client) sendInit() error {
|
2016-06-15 16:57:05 +08:00
|
|
|
|
return c.clientConn.conn.sendPacket(sshFxInitPacket{
|
2014-10-10 02:45:30 +08:00
|
|
|
|
Version: sftpProtocolVersion, // http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02
|
2013-11-05 19:16:36 +08:00
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
|
2015-05-23 15:10:22 +08:00
|
|
|
|
// returns the next value of c.nextid
|
2016-01-05 05:15:21 +08:00
|
|
|
|
func (c *Client) nextID() uint32 {
|
2015-05-23 15:10:22 +08:00
|
|
|
|
return atomic.AddUint32(&c.nextid, 1)
|
2013-11-05 19:16:36 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (c *Client) recvVersion() error {
|
2020-03-18 16:36:07 +08:00
|
|
|
|
typ, data, err := c.recvPacket(0)
|
2013-11-05 19:16:36 +08:00
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
2019-08-30 23:04:37 +08:00
|
|
|
|
if typ != sshFxpVersion {
|
|
|
|
|
return &unexpectedPacketErr{sshFxpVersion, typ}
|
2013-11-05 19:16:36 +08:00
|
|
|
|
}
|
2014-10-10 02:45:30 +08:00
|
|
|
|
|
2020-10-11 18:42:49 +08:00
|
|
|
|
version, data := unmarshalUint32(data)
|
2014-10-10 02:45:30 +08:00
|
|
|
|
if version != sftpProtocolVersion {
|
|
|
|
|
return &unexpectedVersionErr{sftpProtocolVersion, version}
|
|
|
|
|
}
|
|
|
|
|
|
2020-10-11 18:42:49 +08:00
|
|
|
|
for len(data) > 0 {
|
|
|
|
|
var ext extensionPair
|
|
|
|
|
ext, data, err = unmarshalExtensionPair(data)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
c.ext[ext.Name] = ext.Data
|
|
|
|
|
}
|
|
|
|
|
|
2013-11-05 19:16:36 +08:00
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2020-10-11 18:42:49 +08:00
|
|
|
|
// HasExtension checks whether the server supports a named extension.
|
|
|
|
|
//
|
|
|
|
|
// The first return value is the extension data reported by the server
|
|
|
|
|
// (typically a version number).
|
|
|
|
|
func (c *Client) HasExtension(name string) (string, bool) {
|
|
|
|
|
data, ok := c.ext[name]
|
|
|
|
|
return data, ok
|
|
|
|
|
}
|
|
|
|
|
|
2013-11-05 19:16:36 +08:00
|
|
|
|
// Walk returns a new Walker rooted at root.
|
2013-11-07 08:31:46 +08:00
|
|
|
|
func (c *Client) Walk(root string) *fs.Walker {
|
2013-11-07 14:23:51 +08:00
|
|
|
|
return fs.WalkFS(root, c)
|
2013-11-05 19:16:36 +08:00
|
|
|
|
}
|
|
|
|
|
|
2013-11-07 14:23:51 +08:00
|
|
|
|
// ReadDir reads the directory named by dirname and returns a list of
|
|
|
|
|
// directory entries.
|
|
|
|
|
func (c *Client) ReadDir(p string) ([]os.FileInfo, error) {
|
2013-11-06 12:40:35 +08:00
|
|
|
|
handle, err := c.opendir(p)
|
2013-11-05 19:16:36 +08:00
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
2013-11-08 18:24:50 +08:00
|
|
|
|
defer c.close(handle) // this has to defer earlier than the lock below
|
2013-11-05 19:16:36 +08:00
|
|
|
|
var attrs []os.FileInfo
|
|
|
|
|
var done = false
|
|
|
|
|
for !done {
|
2016-01-05 05:15:21 +08:00
|
|
|
|
id := c.nextID()
|
2020-10-29 06:20:19 +08:00
|
|
|
|
typ, data, err1 := c.sendPacket(nil, sshFxpReaddirPacket{
|
2016-01-05 05:15:21 +08:00
|
|
|
|
ID: id,
|
2013-11-05 19:16:36 +08:00
|
|
|
|
Handle: handle,
|
2013-11-08 18:56:25 +08:00
|
|
|
|
})
|
|
|
|
|
if err1 != nil {
|
|
|
|
|
err = err1
|
|
|
|
|
done = true
|
|
|
|
|
break
|
2013-11-05 19:16:36 +08:00
|
|
|
|
}
|
|
|
|
|
switch typ {
|
2019-08-30 23:04:37 +08:00
|
|
|
|
case sshFxpName:
|
2013-11-05 19:16:36 +08:00
|
|
|
|
sid, data := unmarshalUint32(data)
|
|
|
|
|
if sid != id {
|
2016-01-05 05:15:21 +08:00
|
|
|
|
return nil, &unexpectedIDErr{id, sid}
|
2013-11-05 19:16:36 +08:00
|
|
|
|
}
|
|
|
|
|
count, data := unmarshalUint32(data)
|
|
|
|
|
for i := uint32(0); i < count; i++ {
|
|
|
|
|
var filename string
|
|
|
|
|
filename, data = unmarshalString(data)
|
|
|
|
|
_, data = unmarshalString(data) // discard longname
|
2014-06-23 11:11:28 +08:00
|
|
|
|
var attr *FileStat
|
2013-11-05 19:16:36 +08:00
|
|
|
|
attr, data = unmarshalAttrs(data)
|
|
|
|
|
if filename == "." || filename == ".." {
|
|
|
|
|
continue
|
|
|
|
|
}
|
2014-09-26 04:40:48 +08:00
|
|
|
|
attrs = append(attrs, fileInfoFromStat(attr, path.Base(filename)))
|
2013-11-05 19:16:36 +08:00
|
|
|
|
}
|
2019-08-30 23:04:37 +08:00
|
|
|
|
case sshFxpStatus:
|
2013-11-08 18:56:25 +08:00
|
|
|
|
// TODO(dfc) scope warning!
|
2016-01-04 03:54:19 +08:00
|
|
|
|
err = normaliseError(unmarshalStatus(id, data))
|
2013-11-05 19:16:36 +08:00
|
|
|
|
done = true
|
|
|
|
|
default:
|
|
|
|
|
return nil, unimplementedPacketErr(typ)
|
|
|
|
|
}
|
|
|
|
|
}
|
2013-11-08 18:56:25 +08:00
|
|
|
|
if err == io.EOF {
|
|
|
|
|
err = nil
|
|
|
|
|
}
|
2013-11-05 19:16:36 +08:00
|
|
|
|
return attrs, err
|
|
|
|
|
}
|
2015-12-22 05:45:40 +08:00
|
|
|
|
|
2013-11-05 19:16:36 +08:00
|
|
|
|
func (c *Client) opendir(path string) (string, error) {
|
2016-01-05 05:15:21 +08:00
|
|
|
|
id := c.nextID()
|
2020-10-29 06:20:19 +08:00
|
|
|
|
typ, data, err := c.sendPacket(nil, sshFxpOpendirPacket{
|
2016-01-05 05:15:21 +08:00
|
|
|
|
ID: id,
|
2013-11-05 19:16:36 +08:00
|
|
|
|
Path: path,
|
2013-11-08 18:56:25 +08:00
|
|
|
|
})
|
2013-11-05 19:16:36 +08:00
|
|
|
|
if err != nil {
|
|
|
|
|
return "", err
|
|
|
|
|
}
|
|
|
|
|
switch typ {
|
2019-08-30 23:04:37 +08:00
|
|
|
|
case sshFxpHandle:
|
2013-11-05 19:16:36 +08:00
|
|
|
|
sid, data := unmarshalUint32(data)
|
|
|
|
|
if sid != id {
|
2016-01-05 05:15:21 +08:00
|
|
|
|
return "", &unexpectedIDErr{id, sid}
|
2013-11-05 19:16:36 +08:00
|
|
|
|
}
|
|
|
|
|
handle, _ := unmarshalString(data)
|
|
|
|
|
return handle, nil
|
2019-08-30 23:04:37 +08:00
|
|
|
|
case sshFxpStatus:
|
2017-04-18 08:52:26 +08:00
|
|
|
|
return "", normaliseError(unmarshalStatus(id, data))
|
2013-11-05 19:16:36 +08:00
|
|
|
|
default:
|
|
|
|
|
return "", unimplementedPacketErr(typ)
|
2015-08-06 03:57:28 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2015-09-08 14:04:52 +08:00
|
|
|
|
// Stat returns a FileInfo structure describing the file specified by path 'p'.
|
|
|
|
|
// If 'p' is a symbolic link, the returned FileInfo structure describes the referent file.
|
2015-08-06 03:57:28 +08:00
|
|
|
|
func (c *Client) Stat(p string) (os.FileInfo, error) {
|
2016-01-05 05:15:21 +08:00
|
|
|
|
id := c.nextID()
|
2020-10-29 06:20:19 +08:00
|
|
|
|
typ, data, err := c.sendPacket(nil, sshFxpStatPacket{
|
2016-01-05 05:15:21 +08:00
|
|
|
|
ID: id,
|
2015-08-06 03:57:28 +08:00
|
|
|
|
Path: p,
|
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
switch typ {
|
2019-08-30 23:04:37 +08:00
|
|
|
|
case sshFxpAttrs:
|
2015-08-06 03:57:28 +08:00
|
|
|
|
sid, data := unmarshalUint32(data)
|
|
|
|
|
if sid != id {
|
2016-01-05 05:15:21 +08:00
|
|
|
|
return nil, &unexpectedIDErr{id, sid}
|
2015-08-06 03:57:28 +08:00
|
|
|
|
}
|
|
|
|
|
attr, _ := unmarshalAttrs(data)
|
|
|
|
|
return fileInfoFromStat(attr, path.Base(p)), nil
|
2019-08-30 23:04:37 +08:00
|
|
|
|
case sshFxpStatus:
|
2016-01-04 03:54:19 +08:00
|
|
|
|
return nil, normaliseError(unmarshalStatus(id, data))
|
2015-08-06 03:57:28 +08:00
|
|
|
|
default:
|
|
|
|
|
return nil, unimplementedPacketErr(typ)
|
2013-11-05 19:16:36 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2015-09-08 14:04:52 +08:00
|
|
|
|
// Lstat returns a FileInfo structure describing the file specified by path 'p'.
|
|
|
|
|
// If 'p' is a symbolic link, the returned FileInfo structure describes the symbolic link.
|
2013-11-06 12:40:35 +08:00
|
|
|
|
func (c *Client) Lstat(p string) (os.FileInfo, error) {
|
2016-01-05 05:15:21 +08:00
|
|
|
|
id := c.nextID()
|
2020-10-29 06:20:19 +08:00
|
|
|
|
typ, data, err := c.sendPacket(nil, sshFxpLstatPacket{
|
2016-01-05 05:15:21 +08:00
|
|
|
|
ID: id,
|
2013-11-06 12:40:35 +08:00
|
|
|
|
Path: p,
|
2013-11-08 18:56:25 +08:00
|
|
|
|
})
|
2013-11-05 19:16:36 +08:00
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
switch typ {
|
2019-08-30 23:04:37 +08:00
|
|
|
|
case sshFxpAttrs:
|
2013-11-05 19:16:36 +08:00
|
|
|
|
sid, data := unmarshalUint32(data)
|
|
|
|
|
if sid != id {
|
2016-01-05 05:15:21 +08:00
|
|
|
|
return nil, &unexpectedIDErr{id, sid}
|
2013-11-05 19:16:36 +08:00
|
|
|
|
}
|
|
|
|
|
attr, _ := unmarshalAttrs(data)
|
2014-06-23 11:11:28 +08:00
|
|
|
|
return fileInfoFromStat(attr, path.Base(p)), nil
|
2019-08-30 23:04:37 +08:00
|
|
|
|
case sshFxpStatus:
|
2016-01-04 03:54:19 +08:00
|
|
|
|
return nil, normaliseError(unmarshalStatus(id, data))
|
2013-11-05 19:16:36 +08:00
|
|
|
|
default:
|
|
|
|
|
return nil, unimplementedPacketErr(typ)
|
|
|
|
|
}
|
|
|
|
|
}
|
2013-11-06 08:04:26 +08:00
|
|
|
|
|
2014-09-24 02:42:28 +08:00
|
|
|
|
// ReadLink reads the target of a symbolic link.
|
2014-09-23 10:29:20 +08:00
|
|
|
|
func (c *Client) ReadLink(p string) (string, error) {
|
2016-01-05 05:15:21 +08:00
|
|
|
|
id := c.nextID()
|
2020-10-29 06:20:19 +08:00
|
|
|
|
typ, data, err := c.sendPacket(nil, sshFxpReadlinkPacket{
|
2016-01-05 05:15:21 +08:00
|
|
|
|
ID: id,
|
2014-09-23 10:29:20 +08:00
|
|
|
|
Path: p,
|
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return "", err
|
|
|
|
|
}
|
|
|
|
|
switch typ {
|
2019-08-30 23:04:37 +08:00
|
|
|
|
case sshFxpName:
|
2014-09-23 10:29:20 +08:00
|
|
|
|
sid, data := unmarshalUint32(data)
|
|
|
|
|
if sid != id {
|
2016-01-05 05:15:21 +08:00
|
|
|
|
return "", &unexpectedIDErr{id, sid}
|
2014-09-23 10:29:20 +08:00
|
|
|
|
}
|
|
|
|
|
count, data := unmarshalUint32(data)
|
|
|
|
|
if count != 1 {
|
2014-09-24 02:42:28 +08:00
|
|
|
|
return "", unexpectedCount(1, count)
|
2014-09-23 10:29:20 +08:00
|
|
|
|
}
|
2014-09-24 02:42:28 +08:00
|
|
|
|
filename, _ := unmarshalString(data) // ignore dummy attributes
|
2014-09-23 10:29:20 +08:00
|
|
|
|
return filename, nil
|
2019-08-30 23:04:37 +08:00
|
|
|
|
case sshFxpStatus:
|
2017-04-18 08:52:26 +08:00
|
|
|
|
return "", normaliseError(unmarshalStatus(id, data))
|
2014-09-23 10:29:20 +08:00
|
|
|
|
default:
|
|
|
|
|
return "", unimplementedPacketErr(typ)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-05-25 03:23:18 +08:00
|
|
|
|
// Link creates a hard link at 'newname', pointing at the same inode as 'oldname'
|
|
|
|
|
func (c *Client) Link(oldname, newname string) error {
|
|
|
|
|
id := c.nextID()
|
2020-10-29 06:20:19 +08:00
|
|
|
|
typ, data, err := c.sendPacket(nil, sshFxpHardlinkPacket{
|
2019-05-25 03:23:18 +08:00
|
|
|
|
ID: id,
|
|
|
|
|
Oldpath: oldname,
|
|
|
|
|
Newpath: newname,
|
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
switch typ {
|
2019-08-30 23:04:37 +08:00
|
|
|
|
case sshFxpStatus:
|
2019-05-25 03:23:18 +08:00
|
|
|
|
return normaliseError(unmarshalStatus(id, data))
|
|
|
|
|
default:
|
|
|
|
|
return unimplementedPacketErr(typ)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2015-09-07 16:05:16 +08:00
|
|
|
|
// Symlink creates a symbolic link at 'newname', pointing at target 'oldname'
|
|
|
|
|
func (c *Client) Symlink(oldname, newname string) error {
|
2016-01-05 05:15:21 +08:00
|
|
|
|
id := c.nextID()
|
2020-10-29 06:20:19 +08:00
|
|
|
|
typ, data, err := c.sendPacket(nil, sshFxpSymlinkPacket{
|
2016-01-05 05:15:21 +08:00
|
|
|
|
ID: id,
|
2015-09-07 16:05:16 +08:00
|
|
|
|
Linkpath: newname,
|
|
|
|
|
Targetpath: oldname,
|
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
switch typ {
|
2019-08-30 23:04:37 +08:00
|
|
|
|
case sshFxpStatus:
|
2016-01-04 03:54:19 +08:00
|
|
|
|
return normaliseError(unmarshalStatus(id, data))
|
2015-09-07 16:05:16 +08:00
|
|
|
|
default:
|
|
|
|
|
return unimplementedPacketErr(typ)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-08-22 15:51:12 +08:00
|
|
|
|
func (c *Client) setfstat(handle string, flags uint32, attrs interface{}) error {
|
|
|
|
|
id := c.nextID()
|
2020-10-29 06:20:19 +08:00
|
|
|
|
typ, data, err := c.sendPacket(nil, sshFxpFsetstatPacket{
|
2020-08-22 15:51:12 +08:00
|
|
|
|
ID: id,
|
|
|
|
|
Handle: handle,
|
|
|
|
|
Flags: flags,
|
|
|
|
|
Attrs: attrs,
|
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
switch typ {
|
|
|
|
|
case sshFxpStatus:
|
|
|
|
|
return normaliseError(unmarshalStatus(id, data))
|
|
|
|
|
default:
|
|
|
|
|
return unimplementedPacketErr(typ)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2014-06-22 08:48:40 +08:00
|
|
|
|
// setstat is a convience wrapper to allow for changing of various parts of the file descriptor.
|
2014-09-26 04:40:48 +08:00
|
|
|
|
func (c *Client) setstat(path string, flags uint32, attrs interface{}) error {
|
2016-01-05 05:15:21 +08:00
|
|
|
|
id := c.nextID()
|
2020-10-29 06:20:19 +08:00
|
|
|
|
typ, data, err := c.sendPacket(nil, sshFxpSetstatPacket{
|
2016-01-05 05:15:21 +08:00
|
|
|
|
ID: id,
|
2013-12-11 07:18:53 +08:00
|
|
|
|
Path: path,
|
2014-06-22 08:48:40 +08:00
|
|
|
|
Flags: flags,
|
|
|
|
|
Attrs: attrs,
|
2013-12-11 07:18:53 +08:00
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
switch typ {
|
2019-08-30 23:04:37 +08:00
|
|
|
|
case sshFxpStatus:
|
2016-01-04 03:54:19 +08:00
|
|
|
|
return normaliseError(unmarshalStatus(id, data))
|
2013-12-11 07:18:53 +08:00
|
|
|
|
default:
|
|
|
|
|
return unimplementedPacketErr(typ)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2014-06-22 13:25:40 +08:00
|
|
|
|
// Chtimes changes the access and modification times of the named file.
|
2014-06-22 08:48:40 +08:00
|
|
|
|
func (c *Client) Chtimes(path string, atime time.Time, mtime time.Time) error {
|
2014-06-22 13:25:40 +08:00
|
|
|
|
type times struct {
|
|
|
|
|
Atime uint32
|
|
|
|
|
Mtime uint32
|
|
|
|
|
}
|
2014-06-23 04:01:04 +08:00
|
|
|
|
attrs := times{uint32(atime.Unix()), uint32(mtime.Unix())}
|
2019-08-30 23:04:37 +08:00
|
|
|
|
return c.setstat(path, sshFileXferAttrACmodTime, attrs)
|
2014-06-22 13:25:40 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Chown changes the user and group owners of the named file.
|
|
|
|
|
func (c *Client) Chown(path string, uid, gid int) error {
|
|
|
|
|
type owner struct {
|
2016-01-05 05:15:21 +08:00
|
|
|
|
UID uint32
|
|
|
|
|
GID uint32
|
2014-06-22 13:25:40 +08:00
|
|
|
|
}
|
2014-06-23 04:01:04 +08:00
|
|
|
|
attrs := owner{uint32(uid), uint32(gid)}
|
2019-08-30 23:04:37 +08:00
|
|
|
|
return c.setstat(path, sshFileXferAttrUIDGID, attrs)
|
2014-06-22 13:25:40 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Chmod changes the permissions of the named file.
|
|
|
|
|
func (c *Client) Chmod(path string, mode os.FileMode) error {
|
2019-08-30 23:04:37 +08:00
|
|
|
|
return c.setstat(path, sshFileXferAttrPermissions, uint32(mode))
|
2014-06-22 13:25:40 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Truncate sets the size of the named file. Although it may be safely assumed
|
2014-09-26 04:40:48 +08:00
|
|
|
|
// that if the size is less than its current size it will be truncated to fit,
|
2014-06-22 13:25:40 +08:00
|
|
|
|
// the SFTP protocol does not specify what behavior the server should do when setting
|
|
|
|
|
// size greater than the current size.
|
|
|
|
|
func (c *Client) Truncate(path string, size int64) error {
|
2019-08-30 23:04:37 +08:00
|
|
|
|
return c.setstat(path, sshFileXferAttrSize, uint64(size))
|
2014-06-22 08:48:40 +08:00
|
|
|
|
}
|
|
|
|
|
|
2013-11-06 08:04:26 +08:00
|
|
|
|
// Open opens the named file for reading. If successful, methods on the
|
|
|
|
|
// returned file can be used for reading; the associated file descriptor
|
|
|
|
|
// has mode O_RDONLY.
|
|
|
|
|
func (c *Client) Open(path string) (*File, error) {
|
2013-11-14 12:32:21 +08:00
|
|
|
|
return c.open(path, flags(os.O_RDONLY))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// OpenFile is the generalized open call; most users will use Open or
|
|
|
|
|
// Create instead. It opens the named file with specified flag (O_RDONLY
|
|
|
|
|
// etc.). If successful, methods on the returned File can be used for I/O.
|
|
|
|
|
func (c *Client) OpenFile(path string, f int) (*File, error) {
|
|
|
|
|
return c.open(path, flags(f))
|
2013-11-06 09:53:45 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (c *Client) open(path string, pflags uint32) (*File, error) {
|
2016-01-05 05:15:21 +08:00
|
|
|
|
id := c.nextID()
|
2020-10-29 06:20:19 +08:00
|
|
|
|
typ, data, err := c.sendPacket(nil, sshFxpOpenPacket{
|
2016-01-05 05:15:21 +08:00
|
|
|
|
ID: id,
|
2013-11-06 08:04:26 +08:00
|
|
|
|
Path: path,
|
2013-11-06 09:53:45 +08:00
|
|
|
|
Pflags: pflags,
|
2013-11-08 18:56:25 +08:00
|
|
|
|
})
|
2013-11-06 08:04:26 +08:00
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
switch typ {
|
2019-08-30 23:04:37 +08:00
|
|
|
|
case sshFxpHandle:
|
2013-11-06 08:04:26 +08:00
|
|
|
|
sid, data := unmarshalUint32(data)
|
|
|
|
|
if sid != id {
|
2016-01-05 05:15:21 +08:00
|
|
|
|
return nil, &unexpectedIDErr{id, sid}
|
2013-11-06 08:04:26 +08:00
|
|
|
|
}
|
|
|
|
|
handle, _ := unmarshalString(data)
|
2013-11-06 09:36:05 +08:00
|
|
|
|
return &File{c: c, path: path, handle: handle}, nil
|
2019-08-30 23:04:37 +08:00
|
|
|
|
case sshFxpStatus:
|
2016-01-04 03:54:19 +08:00
|
|
|
|
return nil, normaliseError(unmarshalStatus(id, data))
|
2013-11-06 08:04:26 +08:00
|
|
|
|
default:
|
|
|
|
|
return nil, unimplementedPacketErr(typ)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2013-11-06 08:30:01 +08:00
|
|
|
|
// close closes a handle handle previously returned in the response
|
|
|
|
|
// to SSH_FXP_OPEN or SSH_FXP_OPENDIR. The handle becomes invalid
|
|
|
|
|
// immediately after this request has been sent.
|
|
|
|
|
func (c *Client) close(handle string) error {
|
2016-01-05 05:15:21 +08:00
|
|
|
|
id := c.nextID()
|
2020-10-29 06:20:19 +08:00
|
|
|
|
typ, data, err := c.sendPacket(nil, sshFxpClosePacket{
|
2016-01-05 05:15:21 +08:00
|
|
|
|
ID: id,
|
2013-11-06 08:30:01 +08:00
|
|
|
|
Handle: handle,
|
2013-11-08 18:56:25 +08:00
|
|
|
|
})
|
2013-11-06 08:30:01 +08:00
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
switch typ {
|
2019-08-30 23:04:37 +08:00
|
|
|
|
case sshFxpStatus:
|
2016-01-04 03:54:19 +08:00
|
|
|
|
return normaliseError(unmarshalStatus(id, data))
|
2013-11-06 08:30:01 +08:00
|
|
|
|
default:
|
|
|
|
|
return unimplementedPacketErr(typ)
|
|
|
|
|
}
|
|
|
|
|
}
|
2013-11-06 09:36:05 +08:00
|
|
|
|
|
2014-06-23 11:11:28 +08:00
|
|
|
|
func (c *Client) fstat(handle string) (*FileStat, error) {
|
2016-01-05 05:15:21 +08:00
|
|
|
|
id := c.nextID()
|
2020-10-29 06:20:19 +08:00
|
|
|
|
typ, data, err := c.sendPacket(nil, sshFxpFstatPacket{
|
2016-01-05 05:15:21 +08:00
|
|
|
|
ID: id,
|
2013-11-06 09:36:05 +08:00
|
|
|
|
Handle: handle,
|
2013-11-08 18:56:25 +08:00
|
|
|
|
})
|
2013-11-06 09:36:05 +08:00
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
switch typ {
|
2019-08-30 23:04:37 +08:00
|
|
|
|
case sshFxpAttrs:
|
2013-11-06 09:36:05 +08:00
|
|
|
|
sid, data := unmarshalUint32(data)
|
|
|
|
|
if sid != id {
|
2016-01-05 05:15:21 +08:00
|
|
|
|
return nil, &unexpectedIDErr{id, sid}
|
2013-11-06 09:36:05 +08:00
|
|
|
|
}
|
|
|
|
|
attr, _ := unmarshalAttrs(data)
|
|
|
|
|
return attr, nil
|
2019-08-30 23:04:37 +08:00
|
|
|
|
case sshFxpStatus:
|
2017-04-18 08:52:26 +08:00
|
|
|
|
return nil, normaliseError(unmarshalStatus(id, data))
|
2013-11-06 09:36:05 +08:00
|
|
|
|
default:
|
|
|
|
|
return nil, unimplementedPacketErr(typ)
|
|
|
|
|
}
|
|
|
|
|
}
|
2013-11-06 09:42:14 +08:00
|
|
|
|
|
2016-01-08 04:11:16 +08:00
|
|
|
|
// StatVFS retrieves VFS statistics from a remote host.
|
|
|
|
|
//
|
|
|
|
|
// It implements the statvfs@openssh.com SSH_FXP_EXTENDED feature
|
|
|
|
|
// from http://www.opensource.apple.com/source/OpenSSH/OpenSSH-175/openssh/PROTOCOL?txt.
|
2015-05-16 02:37:52 +08:00
|
|
|
|
func (c *Client) StatVFS(path string) (*StatVFS, error) {
|
|
|
|
|
// send the StatVFS packet to the server
|
2016-01-05 05:15:21 +08:00
|
|
|
|
id := c.nextID()
|
2020-10-29 06:20:19 +08:00
|
|
|
|
typ, data, err := c.sendPacket(nil, sshFxpStatvfsPacket{
|
2016-01-05 05:15:21 +08:00
|
|
|
|
ID: id,
|
2015-05-16 02:37:52 +08:00
|
|
|
|
Path: path,
|
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
switch typ {
|
2015-05-23 15:10:22 +08:00
|
|
|
|
// server responded with valid data
|
2019-08-30 23:04:37 +08:00
|
|
|
|
case sshFxpExtendedReply:
|
2015-05-23 15:10:22 +08:00
|
|
|
|
var response StatVFS
|
|
|
|
|
err = binary.Read(bytes.NewReader(data), binary.BigEndian, &response)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, errors.New("can not parse reply")
|
|
|
|
|
}
|
2015-05-16 02:37:52 +08:00
|
|
|
|
|
2015-05-23 15:10:22 +08:00
|
|
|
|
return &response, nil
|
2015-05-16 02:37:52 +08:00
|
|
|
|
|
2015-05-23 15:10:22 +08:00
|
|
|
|
// the resquest failed
|
2019-08-30 23:04:37 +08:00
|
|
|
|
case sshFxpStatus:
|
2021-02-11 02:35:30 +08:00
|
|
|
|
return nil, normaliseError(unmarshalStatus(id, data))
|
2015-05-16 02:37:52 +08:00
|
|
|
|
|
2015-05-23 15:10:22 +08:00
|
|
|
|
default:
|
|
|
|
|
return nil, unimplementedPacketErr(typ)
|
2015-05-16 02:37:52 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2013-11-07 14:23:51 +08:00
|
|
|
|
// Join joins any number of path elements into a single path, adding a
|
|
|
|
|
// separating slash if necessary. The result is Cleaned; in particular, all
|
|
|
|
|
// empty strings are ignored.
|
|
|
|
|
func (c *Client) Join(elem ...string) string { return path.Join(elem...) }
|
|
|
|
|
|
2013-11-23 21:00:17 +08:00
|
|
|
|
// Remove removes the specified file or directory. An error will be returned if no
|
|
|
|
|
// file or directory with the specified path exists, or if the specified directory
|
|
|
|
|
// is not empty.
|
2013-11-06 11:08:26 +08:00
|
|
|
|
func (c *Client) Remove(path string) error {
|
2013-11-23 21:00:17 +08:00
|
|
|
|
err := c.removeFile(path)
|
2020-12-08 21:43:19 +08:00
|
|
|
|
// some servers, *cough* osx *cough*, return EPERM, not ENODIR.
|
|
|
|
|
// serv-u returns ssh_FX_FILE_IS_A_DIRECTORY
|
|
|
|
|
// EPERM is converted to os.ErrPermission so it is not a StatusError
|
2016-09-08 18:00:35 +08:00
|
|
|
|
if err, ok := err.(*StatusError); ok {
|
2015-12-22 12:34:21 +08:00
|
|
|
|
switch err.Code {
|
2020-12-08 21:43:19 +08:00
|
|
|
|
case sshFxFailure, sshFxFileIsADirectory:
|
2016-10-21 22:04:00 +08:00
|
|
|
|
return c.RemoveDirectory(path)
|
2015-12-22 12:34:21 +08:00
|
|
|
|
}
|
2013-11-23 21:00:17 +08:00
|
|
|
|
}
|
2020-12-08 21:43:19 +08:00
|
|
|
|
if os.IsPermission(err) {
|
|
|
|
|
return c.RemoveDirectory(path)
|
|
|
|
|
}
|
2013-12-11 07:18:53 +08:00
|
|
|
|
return err
|
2013-11-23 21:00:17 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (c *Client) removeFile(path string) error {
|
2016-01-05 05:15:21 +08:00
|
|
|
|
id := c.nextID()
|
2020-10-29 06:20:19 +08:00
|
|
|
|
typ, data, err := c.sendPacket(nil, sshFxpRemovePacket{
|
2016-01-05 05:15:21 +08:00
|
|
|
|
ID: id,
|
2013-11-06 11:08:26 +08:00
|
|
|
|
Filename: path,
|
2013-11-08 18:56:25 +08:00
|
|
|
|
})
|
2013-11-06 11:08:26 +08:00
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
switch typ {
|
2019-08-30 23:04:37 +08:00
|
|
|
|
case sshFxpStatus:
|
2016-01-04 03:54:19 +08:00
|
|
|
|
return normaliseError(unmarshalStatus(id, data))
|
2013-11-06 11:08:26 +08:00
|
|
|
|
default:
|
|
|
|
|
return unimplementedPacketErr(typ)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-10-21 22:04:00 +08:00
|
|
|
|
// RemoveDirectory removes a directory path.
|
|
|
|
|
func (c *Client) RemoveDirectory(path string) error {
|
2016-01-05 05:15:21 +08:00
|
|
|
|
id := c.nextID()
|
2020-10-29 06:20:19 +08:00
|
|
|
|
typ, data, err := c.sendPacket(nil, sshFxpRmdirPacket{
|
2016-01-05 05:15:21 +08:00
|
|
|
|
ID: id,
|
2013-12-11 07:18:53 +08:00
|
|
|
|
Path: path,
|
2013-11-23 21:00:17 +08:00
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
switch typ {
|
2019-08-30 23:04:37 +08:00
|
|
|
|
case sshFxpStatus:
|
2016-01-04 03:54:19 +08:00
|
|
|
|
return normaliseError(unmarshalStatus(id, data))
|
2013-11-23 21:00:17 +08:00
|
|
|
|
default:
|
|
|
|
|
return unimplementedPacketErr(typ)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2013-11-06 11:15:26 +08:00
|
|
|
|
// Rename renames a file.
|
|
|
|
|
func (c *Client) Rename(oldname, newname string) error {
|
2016-01-05 05:15:21 +08:00
|
|
|
|
id := c.nextID()
|
2020-10-29 06:20:19 +08:00
|
|
|
|
typ, data, err := c.sendPacket(nil, sshFxpRenamePacket{
|
2016-01-05 05:15:21 +08:00
|
|
|
|
ID: id,
|
2013-11-06 11:15:26 +08:00
|
|
|
|
Oldpath: oldname,
|
|
|
|
|
Newpath: newname,
|
2013-11-08 18:56:25 +08:00
|
|
|
|
})
|
2013-11-06 11:15:26 +08:00
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
switch typ {
|
2019-08-30 23:04:37 +08:00
|
|
|
|
case sshFxpStatus:
|
2016-01-04 03:54:19 +08:00
|
|
|
|
return normaliseError(unmarshalStatus(id, data))
|
2013-11-06 11:15:26 +08:00
|
|
|
|
default:
|
|
|
|
|
return unimplementedPacketErr(typ)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-09-05 19:46:03 +08:00
|
|
|
|
// PosixRename renames a file using the posix-rename@openssh.com extension
|
|
|
|
|
// which will replace newname if it already exists.
|
|
|
|
|
func (c *Client) PosixRename(oldname, newname string) error {
|
|
|
|
|
id := c.nextID()
|
2020-10-29 06:20:19 +08:00
|
|
|
|
typ, data, err := c.sendPacket(nil, sshFxpPosixRenamePacket{
|
2017-09-05 19:46:03 +08:00
|
|
|
|
ID: id,
|
|
|
|
|
Oldpath: oldname,
|
|
|
|
|
Newpath: newname,
|
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
switch typ {
|
2019-08-30 23:04:37 +08:00
|
|
|
|
case sshFxpStatus:
|
2017-09-05 19:46:03 +08:00
|
|
|
|
return normaliseError(unmarshalStatus(id, data))
|
|
|
|
|
default:
|
|
|
|
|
return unimplementedPacketErr(typ)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2015-12-22 05:45:40 +08:00
|
|
|
|
func (c *Client) realpath(path string) (string, error) {
|
2016-01-05 05:15:21 +08:00
|
|
|
|
id := c.nextID()
|
2020-10-29 06:20:19 +08:00
|
|
|
|
typ, data, err := c.sendPacket(nil, sshFxpRealpathPacket{
|
2016-01-05 05:15:21 +08:00
|
|
|
|
ID: id,
|
2015-12-22 05:45:40 +08:00
|
|
|
|
Path: path,
|
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return "", err
|
|
|
|
|
}
|
|
|
|
|
switch typ {
|
2019-08-30 23:04:37 +08:00
|
|
|
|
case sshFxpName:
|
2015-12-22 05:45:40 +08:00
|
|
|
|
sid, data := unmarshalUint32(data)
|
|
|
|
|
if sid != id {
|
2016-01-05 05:15:21 +08:00
|
|
|
|
return "", &unexpectedIDErr{id, sid}
|
2015-12-22 05:45:40 +08:00
|
|
|
|
}
|
|
|
|
|
count, data := unmarshalUint32(data)
|
|
|
|
|
if count != 1 {
|
|
|
|
|
return "", unexpectedCount(1, count)
|
|
|
|
|
}
|
|
|
|
|
filename, _ := unmarshalString(data) // ignore attributes
|
|
|
|
|
return filename, nil
|
2019-08-30 23:04:37 +08:00
|
|
|
|
case sshFxpStatus:
|
2016-01-04 03:54:19 +08:00
|
|
|
|
return "", normaliseError(unmarshalStatus(id, data))
|
2015-12-22 05:45:40 +08:00
|
|
|
|
default:
|
|
|
|
|
return "", unimplementedPacketErr(typ)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Getwd returns the current working directory of the server. Operations
|
|
|
|
|
// involving relative paths will be based at this location.
|
|
|
|
|
func (c *Client) Getwd() (string, error) {
|
|
|
|
|
return c.realpath(".")
|
|
|
|
|
}
|
|
|
|
|
|
2016-01-08 04:11:16 +08:00
|
|
|
|
// Mkdir creates the specified directory. An error will be returned if a file or
|
2013-11-23 07:35:12 +08:00
|
|
|
|
// directory with the specified path already exists, or if the directory's
|
|
|
|
|
// parent folder does not exist (the method cannot create complete paths).
|
2013-11-23 21:00:17 +08:00
|
|
|
|
func (c *Client) Mkdir(path string) error {
|
2016-01-05 05:15:21 +08:00
|
|
|
|
id := c.nextID()
|
2020-10-29 06:20:19 +08:00
|
|
|
|
typ, data, err := c.sendPacket(nil, sshFxpMkdirPacket{
|
2016-01-05 05:15:21 +08:00
|
|
|
|
ID: id,
|
2013-12-11 07:18:53 +08:00
|
|
|
|
Path: path,
|
2013-11-23 07:35:12 +08:00
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
switch typ {
|
2019-08-30 23:04:37 +08:00
|
|
|
|
case sshFxpStatus:
|
2016-01-04 03:54:19 +08:00
|
|
|
|
return normaliseError(unmarshalStatus(id, data))
|
2013-11-23 07:35:12 +08:00
|
|
|
|
default:
|
|
|
|
|
return unimplementedPacketErr(typ)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-04-25 14:58:10 +08:00
|
|
|
|
// MkdirAll creates a directory named path, along with any necessary parents,
|
|
|
|
|
// and returns nil, or else returns an error.
|
|
|
|
|
// If path is already a directory, MkdirAll does nothing and returns nil.
|
|
|
|
|
// If path contains a regular file, an error is returned
|
|
|
|
|
func (c *Client) MkdirAll(path string) error {
|
2018-04-25 23:11:13 +08:00
|
|
|
|
// Most of this code mimics https://golang.org/src/os/path.go?s=514:561#L13
|
|
|
|
|
// Fast path: if we can tell whether path is a directory or file, stop with success or error.
|
|
|
|
|
dir, err := c.Stat(path)
|
|
|
|
|
if err == nil {
|
|
|
|
|
if dir.IsDir() {
|
|
|
|
|
return nil
|
2018-04-25 14:58:10 +08:00
|
|
|
|
}
|
2018-04-25 23:11:13 +08:00
|
|
|
|
return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Slow path: make sure parent exists and then call Mkdir for path.
|
|
|
|
|
i := len(path)
|
|
|
|
|
for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator.
|
|
|
|
|
i--
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
j := i
|
|
|
|
|
for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element.
|
|
|
|
|
j--
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if j > 1 {
|
|
|
|
|
// Create parent
|
|
|
|
|
err = c.MkdirAll(path[0 : j-1])
|
2018-04-25 14:58:10 +08:00
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
}
|
2018-04-25 23:11:13 +08:00
|
|
|
|
|
|
|
|
|
// Parent now exists; invoke Mkdir and use its result.
|
|
|
|
|
err = c.Mkdir(path)
|
|
|
|
|
if err != nil {
|
|
|
|
|
// Handle arguments like "foo/." by
|
|
|
|
|
// double-checking that directory doesn't exist.
|
|
|
|
|
dir, err1 := c.Lstat(path)
|
|
|
|
|
if err1 == nil && dir.IsDir() {
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
return err
|
|
|
|
|
}
|
2018-04-25 14:58:10 +08:00
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2013-11-06 09:42:14 +08:00
|
|
|
|
// File represents a remote file.
|
|
|
|
|
type File struct {
|
2013-11-06 09:53:45 +08:00
|
|
|
|
c *Client
|
|
|
|
|
path string
|
|
|
|
|
handle string
|
2020-08-19 19:09:42 +08:00
|
|
|
|
|
2020-08-22 15:51:12 +08:00
|
|
|
|
mu sync.Mutex
|
2021-02-22 04:32:09 +08:00
|
|
|
|
offset int64 // current offset within remote file
|
2013-11-06 09:42:14 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Close closes the File, rendering it unusable for I/O. It returns an
|
|
|
|
|
// error, if any.
|
|
|
|
|
func (f *File) Close() error {
|
2013-11-06 09:53:45 +08:00
|
|
|
|
return f.c.close(f.handle)
|
2013-11-06 09:42:14 +08:00
|
|
|
|
}
|
|
|
|
|
|
2015-09-25 15:55:20 +08:00
|
|
|
|
// Name returns the name of the file as presented to Open or Create.
|
|
|
|
|
func (f *File) Name() string {
|
2015-12-21 23:27:15 +08:00
|
|
|
|
return f.path
|
2015-09-25 15:55:20 +08:00
|
|
|
|
}
|
|
|
|
|
|
2017-01-14 11:58:21 +08:00
|
|
|
|
// Read reads up to len(b) bytes from the File. It returns the number of bytes
|
|
|
|
|
// read and an error, if any. Read follows io.Reader semantics, so when Read
|
|
|
|
|
// encounters an error or EOF condition after successfully reading n > 0 bytes,
|
|
|
|
|
// it returns the number of bytes read.
|
2018-05-24 23:41:25 +08:00
|
|
|
|
//
|
|
|
|
|
// To maximise throughput for transferring the entire file (especially
|
|
|
|
|
// over high latency links) it is recommended to use WriteTo rather
|
|
|
|
|
// than calling Read multiple times. io.Copy will do this
|
|
|
|
|
// automatically.
|
2013-11-06 09:42:14 +08:00
|
|
|
|
func (f *File) Read(b []byte) (int, error) {
|
2020-08-19 19:09:42 +08:00
|
|
|
|
f.mu.Lock()
|
|
|
|
|
defer f.mu.Unlock()
|
|
|
|
|
|
2021-02-22 04:32:09 +08:00
|
|
|
|
n, err := f.ReadAt(b, f.offset)
|
|
|
|
|
f.offset += int64(n)
|
|
|
|
|
return n, err
|
2019-04-12 04:45:25 +08:00
|
|
|
|
}
|
|
|
|
|
|
2020-10-29 22:32:10 +08:00
|
|
|
|
// readChunkAt attempts to read the whole entire length of the buffer from the file starting at the offset.
|
|
|
|
|
// It will continue progressively reading into the buffer until it fills the whole buffer, or an error occurs.
|
|
|
|
|
func (f *File) readChunkAt(ch chan result, b []byte, off int64) (n int, err error) {
|
|
|
|
|
for err == nil && n < len(b) {
|
|
|
|
|
id := f.c.nextID()
|
|
|
|
|
typ, data, err := f.c.sendPacket(ch, sshFxpReadPacket{
|
|
|
|
|
ID: id,
|
|
|
|
|
Handle: f.handle,
|
|
|
|
|
Offset: uint64(off) + uint64(n),
|
|
|
|
|
Len: uint32(len(b) - n),
|
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return n, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
switch typ {
|
|
|
|
|
case sshFxpStatus:
|
|
|
|
|
return n, normaliseError(unmarshalStatus(id, data))
|
|
|
|
|
|
|
|
|
|
case sshFxpData:
|
|
|
|
|
sid, data := unmarshalUint32(data)
|
|
|
|
|
if id != sid {
|
|
|
|
|
return n, &unexpectedIDErr{id, sid}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
l, data := unmarshalUint32(data)
|
|
|
|
|
n += copy(b[n:], data[:l])
|
|
|
|
|
|
|
|
|
|
default:
|
|
|
|
|
return n, unimplementedPacketErr(typ)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2020-08-22 15:51:12 +08:00
|
|
|
|
// ReadAt reads up to len(b) byte from the File at a given offset `off`. It returns
|
|
|
|
|
// the number of bytes read and an error, if any. ReadAt follows io.ReaderAt semantics,
|
2019-04-12 04:45:25 +08:00
|
|
|
|
// so the file offset is not altered during the read.
|
2020-10-29 22:32:10 +08:00
|
|
|
|
func (f *File) ReadAt(b []byte, off int64) (int, error) {
|
|
|
|
|
if len(b) <= f.c.maxPacket {
|
2021-02-22 04:32:09 +08:00
|
|
|
|
// This should be able to be serviced with 1/2 requests.
|
|
|
|
|
// So, just do it directly.
|
|
|
|
|
return f.readChunkAt(nil, b, off)
|
2020-10-29 22:32:10 +08:00
|
|
|
|
}
|
|
|
|
|
|
2021-02-22 04:32:09 +08:00
|
|
|
|
// Split the read into multiple maxPacket-sized concurrent reads bounded by maxConcurrentRequests.
|
|
|
|
|
// This allows writes with a suitably large buffer to transfer data at a much faster rate
|
|
|
|
|
// by overlapping round trip times.
|
|
|
|
|
|
|
|
|
|
cancel := make(chan struct{})
|
2020-10-29 22:32:10 +08:00
|
|
|
|
|
|
|
|
|
type work struct {
|
|
|
|
|
b []byte
|
|
|
|
|
off int64
|
2015-06-03 00:37:35 +08:00
|
|
|
|
}
|
2020-10-29 22:32:10 +08:00
|
|
|
|
workCh := make(chan work)
|
|
|
|
|
|
|
|
|
|
// Slice: cut up the Read into any number of buffers of length <= f.c.maxPacket, and at appropriate offsets.
|
|
|
|
|
go func() {
|
|
|
|
|
defer close(workCh)
|
|
|
|
|
|
|
|
|
|
b := b
|
2021-02-22 04:32:09 +08:00
|
|
|
|
offset := off
|
|
|
|
|
chunkSize := f.c.maxPacket
|
2020-10-29 22:32:10 +08:00
|
|
|
|
|
|
|
|
|
for len(b) > 0 {
|
|
|
|
|
rb := b
|
2021-02-22 04:32:09 +08:00
|
|
|
|
if len(rb) > chunkSize {
|
|
|
|
|
rb = rb[:chunkSize]
|
2020-10-29 22:32:10 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
select {
|
|
|
|
|
case workCh <- work{rb, offset}:
|
|
|
|
|
case <-cancel:
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
offset += int64(len(rb))
|
|
|
|
|
b = b[len(rb):]
|
|
|
|
|
}
|
|
|
|
|
}()
|
|
|
|
|
|
2021-02-22 04:32:09 +08:00
|
|
|
|
type rErr struct {
|
|
|
|
|
off int64
|
|
|
|
|
err error
|
|
|
|
|
}
|
|
|
|
|
errCh := make(chan rErr)
|
|
|
|
|
|
2020-10-29 22:32:10 +08:00
|
|
|
|
concurrency := len(b)/f.c.maxPacket + 1
|
|
|
|
|
if concurrency > f.c.maxConcurrentRequests {
|
|
|
|
|
concurrency = f.c.maxConcurrentRequests
|
2015-06-03 00:37:35 +08:00
|
|
|
|
}
|
|
|
|
|
|
2020-10-29 22:32:10 +08:00
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
|
wg.Add(concurrency)
|
|
|
|
|
for i := 0; i < concurrency; i++ {
|
|
|
|
|
// Map_i: each worker gets work, and does the Read into each buffer from its respective offset.
|
|
|
|
|
go func() {
|
|
|
|
|
defer wg.Done()
|
2015-06-03 00:37:35 +08:00
|
|
|
|
|
2021-01-23 00:45:57 +08:00
|
|
|
|
ch := make(chan result, 1) // reusable channel per mapper.
|
2020-10-29 22:32:10 +08:00
|
|
|
|
|
|
|
|
|
for packet := range workCh {
|
|
|
|
|
n, err := f.readChunkAt(ch, packet.b, packet.off)
|
|
|
|
|
if err != nil {
|
|
|
|
|
// return the offset as the start + how much we read before the error.
|
2021-02-22 04:32:09 +08:00
|
|
|
|
errCh <- rErr{packet.off + int64(n), err}
|
2020-10-29 22:32:10 +08:00
|
|
|
|
return
|
2017-02-25 20:17:42 +08:00
|
|
|
|
}
|
2015-06-03 00:37:35 +08:00
|
|
|
|
}
|
2020-10-29 22:32:10 +08:00
|
|
|
|
}()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Wait for long tail, before closing results.
|
|
|
|
|
go func() {
|
|
|
|
|
wg.Wait()
|
|
|
|
|
close(errCh)
|
|
|
|
|
}()
|
|
|
|
|
|
|
|
|
|
// collect all the results into a relevant return: the earliest offset to return an error.
|
2021-02-22 04:32:09 +08:00
|
|
|
|
firstErr := rErr{math.MaxInt64, nil}
|
|
|
|
|
for rErr := range errCh {
|
|
|
|
|
if rErr.off <= firstErr.off {
|
|
|
|
|
firstErr = rErr
|
2020-10-29 22:32:10 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
select {
|
|
|
|
|
case <-cancel:
|
2017-02-25 20:17:42 +08:00
|
|
|
|
default:
|
2020-10-29 22:32:10 +08:00
|
|
|
|
// stop any more work from being distributed. (Just in case.)
|
|
|
|
|
close(cancel)
|
2013-11-15 19:04:14 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
2020-10-29 22:32:10 +08:00
|
|
|
|
|
|
|
|
|
if firstErr.err != nil {
|
|
|
|
|
// firstErr.err != nil if and only if firstErr.off > our starting offset.
|
|
|
|
|
return int(firstErr.off - off), firstErr.err
|
2015-06-03 00:37:35 +08:00
|
|
|
|
}
|
2020-10-29 22:32:10 +08:00
|
|
|
|
|
|
|
|
|
// As per spec for io.ReaderAt, we return nil error if and only if we read everything.
|
|
|
|
|
return len(b), nil
|
2013-11-06 09:42:14 +08:00
|
|
|
|
}
|
|
|
|
|
|
2021-02-22 04:32:09 +08:00
|
|
|
|
// writeToSequential implements WriteTo, but works sequentially with no parallelism.
|
2021-01-23 00:45:57 +08:00
|
|
|
|
func (f *File) writeToSequential(w io.Writer) (written int64, err error) {
|
|
|
|
|
b := make([]byte, f.c.maxPacket)
|
|
|
|
|
ch := make(chan result, 1) // reusable channel
|
|
|
|
|
|
2021-02-22 04:32:09 +08:00
|
|
|
|
for {
|
|
|
|
|
n, err := f.readChunkAt(ch, b, f.offset)
|
2021-01-23 00:45:57 +08:00
|
|
|
|
if n < 0 {
|
|
|
|
|
panic("sftp.File: returned negative count from readChunkAt")
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if n > 0 {
|
2021-02-22 04:32:09 +08:00
|
|
|
|
f.offset += int64(n)
|
2021-01-23 00:45:57 +08:00
|
|
|
|
|
|
|
|
|
m, err2 := w.Write(b[:n])
|
|
|
|
|
written += int64(m)
|
|
|
|
|
|
|
|
|
|
if err == nil {
|
|
|
|
|
err = err2
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
|
if err == io.EOF {
|
|
|
|
|
return written, nil // return nil explicitly.
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return written, err
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-02-22 04:32:09 +08:00
|
|
|
|
// WriteTo writes the file to the given Writer.
|
|
|
|
|
// The return value is the number of bytes written.
|
|
|
|
|
// Any error encountered during the write is also returned.
|
2018-05-24 23:41:25 +08:00
|
|
|
|
//
|
2021-02-22 04:32:09 +08:00
|
|
|
|
// This method is preferred over calling Read multiple times
|
|
|
|
|
// to maximise throughput for transferring the entire file,
|
|
|
|
|
// especially over high latency links.
|
|
|
|
|
func (f *File) WriteTo(w io.Writer) (written int64, err error) {
|
|
|
|
|
f.mu.Lock()
|
|
|
|
|
defer f.mu.Unlock()
|
|
|
|
|
|
|
|
|
|
// While working concurrently, we want to guess how many concurrent workers we should use.
|
2020-01-07 06:54:28 +08:00
|
|
|
|
var fileSize uint64
|
2020-01-04 03:29:41 +08:00
|
|
|
|
if f.c.useFstat {
|
2020-01-07 06:54:28 +08:00
|
|
|
|
fileStat, err := f.c.fstat(f.handle)
|
|
|
|
|
if err != nil {
|
2020-01-04 03:29:41 +08:00
|
|
|
|
return 0, err
|
|
|
|
|
}
|
2020-01-07 06:54:28 +08:00
|
|
|
|
fileSize = fileStat.Size
|
2020-01-04 03:29:41 +08:00
|
|
|
|
} else {
|
2020-01-07 06:54:28 +08:00
|
|
|
|
fi, err := f.c.Stat(f.path)
|
|
|
|
|
if err != nil {
|
2020-01-04 03:29:41 +08:00
|
|
|
|
return 0, err
|
|
|
|
|
}
|
|
|
|
|
fileSize = uint64(fi.Size())
|
2015-06-03 01:41:36 +08:00
|
|
|
|
}
|
2020-01-04 03:29:41 +08:00
|
|
|
|
|
2021-01-23 00:45:57 +08:00
|
|
|
|
if fileSize <= uint64(f.c.maxPacket) {
|
|
|
|
|
// We should be able to handle this in one Read.
|
|
|
|
|
return f.writeToSequential(w)
|
|
|
|
|
}
|
|
|
|
|
|
2021-02-22 04:32:09 +08:00
|
|
|
|
concurrency := int(fileSize/uint64(f.c.maxPacket) + 1) // a bad guess, but better than no guess
|
2021-01-23 00:45:57 +08:00
|
|
|
|
if concurrency > f.c.maxConcurrentRequests {
|
|
|
|
|
concurrency = f.c.maxConcurrentRequests
|
|
|
|
|
}
|
|
|
|
|
|
2021-02-22 04:32:09 +08:00
|
|
|
|
// If the writing Reduce phase has ended, then all work unconditionally needs to be thrown out.
|
2021-01-23 00:45:57 +08:00
|
|
|
|
cancel := make(chan struct{})
|
2021-02-22 04:32:09 +08:00
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
|
defer func() {
|
|
|
|
|
close(cancel)
|
|
|
|
|
// We want to wait until all outstanding goroutines with an `f` or `f.c` reference have completed.
|
|
|
|
|
// Just to be sure we don’t orphan any goroutines any hanging references.
|
|
|
|
|
wg.Wait()
|
|
|
|
|
}()
|
2021-01-23 00:45:57 +08:00
|
|
|
|
|
|
|
|
|
type writeWork struct {
|
2021-02-22 04:32:09 +08:00
|
|
|
|
b []byte
|
|
|
|
|
n int
|
|
|
|
|
off int64
|
|
|
|
|
err error
|
|
|
|
|
|
2021-01-23 00:45:57 +08:00
|
|
|
|
next chan writeWork
|
|
|
|
|
}
|
2021-02-22 04:32:09 +08:00
|
|
|
|
writeCh := make(chan writeWork)
|
2021-01-23 00:45:57 +08:00
|
|
|
|
|
2021-02-22 04:32:09 +08:00
|
|
|
|
type readWork struct {
|
|
|
|
|
off int64
|
|
|
|
|
cur, next chan writeWork
|
|
|
|
|
}
|
|
|
|
|
readCh := make(chan readWork)
|
|
|
|
|
|
|
|
|
|
pool := sync.Pool{
|
|
|
|
|
New: func() interface{} {
|
|
|
|
|
return make([]byte, f.c.maxPacket)
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Slice: hand out chunks of work on demand, with a `cur` and `next` channel built-in for sequencing.
|
|
|
|
|
go func(off int64, chunkSize int) {
|
|
|
|
|
// We pass in arguments, so this goroutine never has to reference `f` or `f.c`.
|
2021-01-23 00:45:57 +08:00
|
|
|
|
|
|
|
|
|
cur := writeCh
|
|
|
|
|
|
2021-02-22 04:32:09 +08:00
|
|
|
|
for {
|
|
|
|
|
next := make(chan writeWork)
|
|
|
|
|
readWork := readWork{
|
|
|
|
|
off: off,
|
|
|
|
|
cur: cur,
|
|
|
|
|
next: next,
|
|
|
|
|
}
|
|
|
|
|
off += int64(chunkSize)
|
|
|
|
|
cur = next
|
2021-01-23 00:45:57 +08:00
|
|
|
|
|
2021-02-22 04:32:09 +08:00
|
|
|
|
select {
|
|
|
|
|
case <-cancel:
|
|
|
|
|
return
|
|
|
|
|
case readCh <- readWork:
|
2021-01-23 00:45:57 +08:00
|
|
|
|
}
|
|
|
|
|
|
2021-02-22 04:32:09 +08:00
|
|
|
|
}
|
|
|
|
|
}(f.offset, f.c.maxPacket)
|
|
|
|
|
|
|
|
|
|
wg.Add(concurrency)
|
|
|
|
|
for i := 0; i < concurrency; i++ {
|
|
|
|
|
// Map_i: each worker gets readWork, and does the Read into a buffer at the given offset.
|
|
|
|
|
go func() {
|
|
|
|
|
defer wg.Done()
|
|
|
|
|
|
|
|
|
|
ch := make(chan result, 1) // reusable channel
|
|
|
|
|
|
|
|
|
|
for {
|
|
|
|
|
var readWork readWork
|
2021-01-23 00:45:57 +08:00
|
|
|
|
|
2021-02-22 04:32:09 +08:00
|
|
|
|
select {
|
|
|
|
|
case <-cancel:
|
|
|
|
|
return
|
|
|
|
|
case readWork = <-readCh:
|
2017-01-23 17:42:32 +08:00
|
|
|
|
}
|
2015-06-03 01:41:36 +08:00
|
|
|
|
|
2021-02-22 04:32:09 +08:00
|
|
|
|
b := pool.Get().([]byte)
|
|
|
|
|
|
|
|
|
|
n, err := f.readChunkAt(ch, b, readWork.off)
|
|
|
|
|
if n < 0 {
|
|
|
|
|
panic("sftp.File: returned negative count from readChunkAt")
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
writeWork := writeWork{
|
|
|
|
|
b: b,
|
|
|
|
|
n: n,
|
|
|
|
|
off: readWork.off + int64(n),
|
|
|
|
|
err: err,
|
|
|
|
|
|
|
|
|
|
next: readWork.next,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
select {
|
|
|
|
|
case readWork.cur <- writeWork:
|
|
|
|
|
case <-cancel:
|
2021-01-23 00:45:57 +08:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2021-02-22 04:32:09 +08:00
|
|
|
|
if err != nil {
|
|
|
|
|
//close(cur)
|
|
|
|
|
return
|
|
|
|
|
}
|
2017-01-23 17:42:32 +08:00
|
|
|
|
}
|
2021-02-22 04:32:09 +08:00
|
|
|
|
}()
|
|
|
|
|
}
|
2021-01-23 00:45:57 +08:00
|
|
|
|
|
2021-02-22 04:32:09 +08:00
|
|
|
|
lastOff := f.offset
|
|
|
|
|
defer func() {
|
|
|
|
|
f.offset = lastOff
|
|
|
|
|
}()
|
2021-01-23 00:45:57 +08:00
|
|
|
|
|
2021-02-22 04:32:09 +08:00
|
|
|
|
// Reduce: serialize the results from the reads into sequential writes.
|
2021-01-23 00:45:57 +08:00
|
|
|
|
cur := writeCh
|
|
|
|
|
for {
|
2021-02-22 04:32:09 +08:00
|
|
|
|
packet, ok := <-cur
|
|
|
|
|
if !ok {
|
|
|
|
|
return written, nil
|
|
|
|
|
}
|
2021-01-23 00:45:57 +08:00
|
|
|
|
|
2021-02-22 04:32:09 +08:00
|
|
|
|
// Because writeWork is serialized, we know we will visit these packets in order.
|
|
|
|
|
lastOff = packet.off
|
|
|
|
|
|
|
|
|
|
if packet.n > 0 {
|
2021-01-23 00:45:57 +08:00
|
|
|
|
n, err := w.Write(packet.b[:packet.n])
|
|
|
|
|
written += int64(n)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return written, err
|
2015-06-03 01:41:36 +08:00
|
|
|
|
}
|
2021-02-22 04:32:09 +08:00
|
|
|
|
}
|
2021-01-23 00:45:57 +08:00
|
|
|
|
|
2021-02-22 04:32:09 +08:00
|
|
|
|
if packet.err != nil {
|
|
|
|
|
if packet.err == io.EOF {
|
|
|
|
|
return written, nil
|
|
|
|
|
}
|
2021-01-23 00:45:57 +08:00
|
|
|
|
|
2021-02-22 04:32:09 +08:00
|
|
|
|
return written, packet.err
|
2013-11-15 19:04:14 +08:00
|
|
|
|
}
|
2021-02-22 04:32:09 +08:00
|
|
|
|
|
|
|
|
|
pool.Put(packet.b)
|
|
|
|
|
cur = packet.next
|
2013-11-15 19:04:14 +08:00
|
|
|
|
}
|
2013-11-06 09:42:14 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Stat returns the FileInfo structure describing file. If there is an
|
2013-11-06 16:48:37 +08:00
|
|
|
|
// error.
|
2013-11-06 09:42:14 +08:00
|
|
|
|
func (f *File) Stat() (os.FileInfo, error) {
|
2014-06-24 14:06:55 +08:00
|
|
|
|
fs, err := f.c.fstat(f.handle)
|
2014-06-23 11:11:28 +08:00
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
2013-11-06 09:53:45 +08:00
|
|
|
|
}
|
2014-06-24 14:06:55 +08:00
|
|
|
|
return fileInfoFromStat(fs, path.Base(f.path)), nil
|
2013-11-06 09:42:14 +08:00
|
|
|
|
}
|
|
|
|
|
|
2013-11-06 10:04:40 +08:00
|
|
|
|
// Write writes len(b) bytes to the File. It returns the number of bytes
|
|
|
|
|
// written and an error, if any. Write returns a non-nil error when n !=
|
|
|
|
|
// len(b).
|
2018-05-24 23:41:25 +08:00
|
|
|
|
//
|
|
|
|
|
// To maximise throughput for transferring the entire file (especially
|
|
|
|
|
// over high latency links) it is recommended to use ReadFrom rather
|
|
|
|
|
// than calling Write multiple times. io.Copy will do this
|
|
|
|
|
// automatically.
|
2013-11-06 10:04:40 +08:00
|
|
|
|
func (f *File) Write(b []byte) (int, error) {
|
2020-10-29 22:32:10 +08:00
|
|
|
|
f.mu.Lock()
|
|
|
|
|
defer f.mu.Unlock()
|
|
|
|
|
|
2021-02-22 04:32:09 +08:00
|
|
|
|
n, err := f.WriteAt(b, f.offset)
|
|
|
|
|
f.offset += int64(n)
|
|
|
|
|
return n, err
|
2020-10-29 22:32:10 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (f *File) writeChunkAt(ch chan result, b []byte, off int64) (int, error) {
|
|
|
|
|
typ, data, err := f.c.sendPacket(ch, sshFxpWritePacket{
|
|
|
|
|
ID: f.c.nextID(),
|
|
|
|
|
Handle: f.handle,
|
|
|
|
|
Offset: uint64(off),
|
|
|
|
|
Length: uint32(len(b)),
|
|
|
|
|
Data: b,
|
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return 0, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
switch typ {
|
|
|
|
|
case sshFxpStatus:
|
|
|
|
|
id, _ := unmarshalUint32(data)
|
|
|
|
|
err := normaliseError(unmarshalStatus(id, data))
|
|
|
|
|
if err != nil {
|
|
|
|
|
return 0, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
default:
|
|
|
|
|
return 0, unimplementedPacketErr(typ)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return len(b), nil
|
|
|
|
|
}
|
|
|
|
|
|
2021-02-22 04:32:09 +08:00
|
|
|
|
// writeAtConcurrent implements WriterAt, but works concurrently rather than sequentially.
|
|
|
|
|
func (f *File) writeAtConcurrent(b []byte, off int64) (int, error) {
|
2015-06-03 00:37:35 +08:00
|
|
|
|
// Split the write into multiple maxPacket sized concurrent writes
|
|
|
|
|
// bounded by maxConcurrentRequests. This allows writes with a suitably
|
|
|
|
|
// large buffer to transfer data at a much faster rate due to
|
|
|
|
|
// overlapping round trip times.
|
|
|
|
|
|
2021-02-22 04:32:09 +08:00
|
|
|
|
cancel := make(chan struct{})
|
|
|
|
|
|
2020-10-29 22:32:10 +08:00
|
|
|
|
type work struct {
|
|
|
|
|
b []byte
|
|
|
|
|
off int64
|
|
|
|
|
}
|
|
|
|
|
workCh := make(chan work)
|
|
|
|
|
|
|
|
|
|
// Slice: cut up the Read into any number of buffers of length <= f.c.maxPacket, and at appropriate offsets.
|
|
|
|
|
go func() {
|
|
|
|
|
defer close(workCh)
|
|
|
|
|
|
2021-02-22 04:32:09 +08:00
|
|
|
|
var read int
|
|
|
|
|
chunkSize := f.c.maxPacket
|
2020-10-29 22:32:10 +08:00
|
|
|
|
|
2021-02-22 04:32:09 +08:00
|
|
|
|
for read < len(b) {
|
|
|
|
|
wb := b[read:]
|
|
|
|
|
if len(wb) > chunkSize {
|
|
|
|
|
wb = wb[:chunkSize]
|
2020-10-29 22:32:10 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
select {
|
2021-02-22 04:32:09 +08:00
|
|
|
|
case workCh <- work{wb, off + int64(read)}:
|
2020-10-29 22:32:10 +08:00
|
|
|
|
case <-cancel:
|
|
|
|
|
return
|
2015-06-03 00:37:35 +08:00
|
|
|
|
}
|
2020-10-29 22:32:10 +08:00
|
|
|
|
|
2021-02-22 04:32:09 +08:00
|
|
|
|
read += len(wb)
|
2020-10-29 22:32:10 +08:00
|
|
|
|
}
|
|
|
|
|
}()
|
|
|
|
|
|
2021-02-22 04:32:09 +08:00
|
|
|
|
type wErr struct {
|
|
|
|
|
off int64
|
|
|
|
|
err error
|
|
|
|
|
}
|
|
|
|
|
errCh := make(chan wErr)
|
|
|
|
|
|
2020-10-29 22:32:10 +08:00
|
|
|
|
concurrency := len(b)/f.c.maxPacket + 1
|
|
|
|
|
if concurrency > f.c.maxConcurrentRequests {
|
|
|
|
|
concurrency = f.c.maxConcurrentRequests
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
|
wg.Add(concurrency)
|
|
|
|
|
for i := 0; i < concurrency; i++ {
|
2021-01-25 17:06:06 +08:00
|
|
|
|
// Map_i: each worker gets work, and does the Write from each buffer to its respective offset.
|
2020-10-29 22:32:10 +08:00
|
|
|
|
go func() {
|
|
|
|
|
defer wg.Done()
|
|
|
|
|
|
2021-01-23 00:45:57 +08:00
|
|
|
|
ch := make(chan result, 1) // reusable channel per mapper.
|
2020-10-29 22:32:10 +08:00
|
|
|
|
|
|
|
|
|
for packet := range workCh {
|
|
|
|
|
n, err := f.writeChunkAt(ch, packet.b, packet.off)
|
|
|
|
|
if err != nil {
|
|
|
|
|
// return the offset as the start + how much we wrote before the error.
|
2021-02-22 04:32:09 +08:00
|
|
|
|
errCh <- wErr{packet.off + int64(n), err}
|
2020-10-29 22:32:10 +08:00
|
|
|
|
}
|
2015-06-03 00:37:35 +08:00
|
|
|
|
}
|
2020-10-29 22:32:10 +08:00
|
|
|
|
}()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Wait for long tail, before closing results.
|
|
|
|
|
go func() {
|
|
|
|
|
wg.Wait()
|
|
|
|
|
close(errCh)
|
|
|
|
|
}()
|
|
|
|
|
|
2021-02-22 04:32:09 +08:00
|
|
|
|
// Reduce: collect all the results into a relevant return: the earliest offset to return an error.
|
|
|
|
|
firstErr := wErr{math.MaxInt64, nil}
|
|
|
|
|
for wErr := range errCh {
|
|
|
|
|
if wErr.off <= firstErr.off {
|
|
|
|
|
firstErr = wErr
|
2020-10-29 22:32:10 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
select {
|
|
|
|
|
case <-cancel:
|
2017-02-25 20:17:42 +08:00
|
|
|
|
default:
|
2020-10-29 22:32:10 +08:00
|
|
|
|
// stop any more work from being distributed. (Just in case.)
|
|
|
|
|
close(cancel)
|
2013-11-08 18:24:50 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
2020-10-29 22:32:10 +08:00
|
|
|
|
|
|
|
|
|
if firstErr.err != nil {
|
2021-02-22 04:32:09 +08:00
|
|
|
|
// firstErr.err != nil if and only if firstErr.off >= our starting offset.
|
2020-10-29 22:32:10 +08:00
|
|
|
|
return int(firstErr.off - off), firstErr.err
|
2013-11-08 18:24:50 +08:00
|
|
|
|
}
|
2020-10-29 22:32:10 +08:00
|
|
|
|
|
|
|
|
|
return len(b), nil
|
2013-11-08 18:24:50 +08:00
|
|
|
|
}
|
|
|
|
|
|
2021-02-22 04:32:09 +08:00
|
|
|
|
// WriteAt writess up to len(b) byte to the File at a given offset `off`. It returns
|
|
|
|
|
// the number of bytes written and an error, if any. WriteAt follows io.WriterAt semantics,
|
|
|
|
|
// so the file offset is not altered during the write.
|
|
|
|
|
func (f *File) WriteAt(b []byte, off int64) (int, error) {
|
|
|
|
|
if len(b) <= f.c.maxPacket {
|
|
|
|
|
// We can do this in one write.
|
|
|
|
|
return f.writeChunkAt(nil, b, off)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if f.c.useConcurrentWrites {
|
|
|
|
|
return f.writeAtConcurrent(b, off)
|
|
|
|
|
}
|
|
|
|
|
|
2021-01-23 00:45:57 +08:00
|
|
|
|
ch := make(chan result, 1) // reusable channel
|
|
|
|
|
|
2021-02-22 04:32:09 +08:00
|
|
|
|
var written int
|
|
|
|
|
chunkSize := f.c.maxPacket
|
|
|
|
|
|
|
|
|
|
for written < len(b) {
|
|
|
|
|
wb := b[written:]
|
|
|
|
|
if len(wb) > chunkSize {
|
|
|
|
|
wb = wb[:chunkSize]
|
2021-01-23 00:45:57 +08:00
|
|
|
|
}
|
|
|
|
|
|
2021-02-22 04:32:09 +08:00
|
|
|
|
n, err := f.writeChunkAt(ch, wb, off+int64(written))
|
2021-01-23 00:45:57 +08:00
|
|
|
|
if n > 0 {
|
2021-02-22 04:32:09 +08:00
|
|
|
|
written += n
|
2021-01-23 00:45:57 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if err != nil {
|
2021-02-22 04:32:09 +08:00
|
|
|
|
return written, err
|
2021-01-23 00:45:57 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
2020-11-26 06:36:59 +08:00
|
|
|
|
|
2021-02-22 04:32:09 +08:00
|
|
|
|
return len(b), nil
|
|
|
|
|
}
|
2020-11-26 06:36:59 +08:00
|
|
|
|
|
2021-02-22 04:32:09 +08:00
|
|
|
|
// readFromConcurrent implements ReaderFrom, but works concurrently rather than sequentially.
|
|
|
|
|
func (f *File) readFromConcurrent(r io.Reader, remain int64) (read int64, err error) {
|
|
|
|
|
// Split the write into multiple maxPacket sized concurrent writes.
|
|
|
|
|
// This allows writes with a suitably large reader
|
|
|
|
|
// to transfer data at a much faster rate due to overlapping round trip times.
|
2015-06-03 02:15:54 +08:00
|
|
|
|
|
2021-02-22 04:32:09 +08:00
|
|
|
|
cancel := make(chan struct{})
|
2020-11-26 06:36:59 +08:00
|
|
|
|
|
|
|
|
|
type work struct {
|
|
|
|
|
b []byte
|
|
|
|
|
n int
|
|
|
|
|
off int64
|
|
|
|
|
}
|
|
|
|
|
workCh := make(chan work)
|
|
|
|
|
|
|
|
|
|
type rwErr struct {
|
|
|
|
|
off int64
|
|
|
|
|
err error
|
|
|
|
|
}
|
|
|
|
|
errCh := make(chan rwErr)
|
|
|
|
|
|
|
|
|
|
pool := sync.Pool{
|
|
|
|
|
New: func() interface{} {
|
|
|
|
|
return make([]byte, f.c.maxPacket)
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Slice: cut up the Read into any number of buffers of length <= f.c.maxPacket, and at appropriate offsets.
|
2021-02-22 04:32:09 +08:00
|
|
|
|
go func() {
|
2020-11-26 06:36:59 +08:00
|
|
|
|
defer close(workCh)
|
|
|
|
|
|
2021-02-22 04:32:09 +08:00
|
|
|
|
offset := f.offset
|
|
|
|
|
|
2020-11-26 06:36:59 +08:00
|
|
|
|
for {
|
|
|
|
|
b := pool.Get().([]byte)
|
|
|
|
|
|
2021-02-22 04:32:09 +08:00
|
|
|
|
n, err := r.Read(b)
|
2020-11-26 06:36:59 +08:00
|
|
|
|
if n > 0 {
|
|
|
|
|
read += int64(n)
|
|
|
|
|
|
|
|
|
|
select {
|
|
|
|
|
case workCh <- work{b, n, offset}:
|
|
|
|
|
// We need the pool.Put(b) to put the whole slice, not just trunced.
|
|
|
|
|
case <-cancel:
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
offset += int64(n)
|
2015-06-03 02:15:54 +08:00
|
|
|
|
}
|
2020-11-26 06:36:59 +08:00
|
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
|
if err != io.EOF {
|
|
|
|
|
errCh <- rwErr{offset, err}
|
|
|
|
|
}
|
|
|
|
|
return
|
2015-06-03 02:15:54 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
2021-02-22 04:32:09 +08:00
|
|
|
|
}()
|
2020-11-26 06:36:59 +08:00
|
|
|
|
|
2021-02-22 04:32:09 +08:00
|
|
|
|
concurrency := int(remain/int64(f.c.maxPacket) + 1) // a bad guess, but better than no guess
|
2020-11-26 06:36:59 +08:00
|
|
|
|
if concurrency > f.c.maxConcurrentRequests {
|
|
|
|
|
concurrency = f.c.maxConcurrentRequests
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
|
wg.Add(concurrency)
|
|
|
|
|
for i := 0; i < concurrency; i++ {
|
2021-01-25 17:06:06 +08:00
|
|
|
|
// Map_i: each worker gets work, and does the Write from each buffer to its respective offset.
|
2020-11-26 06:36:59 +08:00
|
|
|
|
go func() {
|
|
|
|
|
defer wg.Done()
|
|
|
|
|
|
2021-01-23 00:45:57 +08:00
|
|
|
|
ch := make(chan result, 1) // reusable channel per mapper.
|
2020-11-26 06:36:59 +08:00
|
|
|
|
|
|
|
|
|
for packet := range workCh {
|
|
|
|
|
n, err := f.writeChunkAt(ch, packet.b[:packet.n], packet.off)
|
|
|
|
|
if err != nil {
|
|
|
|
|
// return the offset as the start + how much we wrote before the error.
|
|
|
|
|
errCh <- rwErr{packet.off + int64(n), err}
|
|
|
|
|
}
|
|
|
|
|
pool.Put(packet.b)
|
|
|
|
|
}
|
|
|
|
|
}()
|
2015-06-03 02:15:54 +08:00
|
|
|
|
}
|
2020-11-26 06:36:59 +08:00
|
|
|
|
|
|
|
|
|
// Wait for long tail, before closing results.
|
|
|
|
|
go func() {
|
|
|
|
|
wg.Wait()
|
|
|
|
|
close(errCh)
|
|
|
|
|
}()
|
|
|
|
|
|
2021-02-22 04:32:09 +08:00
|
|
|
|
// Collect all the results into a relevant return: the earliest offset to return an error.
|
2021-01-23 00:45:57 +08:00
|
|
|
|
firstErr := rwErr{math.MaxInt64, nil}
|
2020-11-26 06:36:59 +08:00
|
|
|
|
for rwErr := range errCh {
|
2021-01-23 00:45:57 +08:00
|
|
|
|
if rwErr.off <= firstErr.off {
|
2020-11-26 06:36:59 +08:00
|
|
|
|
firstErr = rwErr
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
select {
|
|
|
|
|
case <-cancel:
|
|
|
|
|
default:
|
2021-01-23 00:45:57 +08:00
|
|
|
|
// stop any more work from being distributed.
|
2020-11-26 06:36:59 +08:00
|
|
|
|
close(cancel)
|
|
|
|
|
}
|
2015-06-03 02:15:54 +08:00
|
|
|
|
}
|
2020-11-26 06:36:59 +08:00
|
|
|
|
|
|
|
|
|
if firstErr.err != nil {
|
2021-02-22 04:32:09 +08:00
|
|
|
|
// firstErr.err != nil if and only if firstErr.off is a valid offset.
|
|
|
|
|
//
|
|
|
|
|
// firstErr.off will then be the lesser of:
|
|
|
|
|
// * the last successfully written offset,
|
|
|
|
|
// * the last successfully read offset.
|
|
|
|
|
//
|
|
|
|
|
// This could be less than the last succesfully written offset,
|
|
|
|
|
// which is the whole reason for the UseConcurrentWrites() ClientOption.
|
|
|
|
|
// Callers are responsible for truncating any SFTP files to the correct length.
|
|
|
|
|
f.offset = firstErr.off
|
|
|
|
|
|
|
|
|
|
// ReadFrom is defined to return the read bytes, regardless of any writer errors.
|
2020-11-26 06:36:59 +08:00
|
|
|
|
return read, firstErr.err
|
2015-06-03 02:15:54 +08:00
|
|
|
|
}
|
2020-11-26 06:36:59 +08:00
|
|
|
|
|
2021-02-22 04:32:09 +08:00
|
|
|
|
f.offset += read
|
2020-11-26 06:36:59 +08:00
|
|
|
|
return read, nil
|
2013-11-08 18:24:50 +08:00
|
|
|
|
}
|
|
|
|
|
|
2021-02-22 04:32:09 +08:00
|
|
|
|
// ReadFrom reads data from r until EOF and writes it to the file. The return
|
|
|
|
|
// value is the number of bytes read. Any error except io.EOF encountered
|
|
|
|
|
// during the read is also returned.
|
|
|
|
|
//
|
|
|
|
|
// This method is preferred over calling Write multiple times
|
|
|
|
|
// to maximise throughput for transferring the entire file,
|
|
|
|
|
// especially over high-latency links.
|
|
|
|
|
func (f *File) ReadFrom(r io.Reader) (int64, error) {
|
|
|
|
|
f.mu.Lock()
|
|
|
|
|
defer f.mu.Unlock()
|
|
|
|
|
|
|
|
|
|
if f.c.useConcurrentWrites {
|
|
|
|
|
var remain int64
|
|
|
|
|
switch r := r.(type) {
|
|
|
|
|
case interface{ Len() int }:
|
|
|
|
|
remain = int64(r.Len())
|
|
|
|
|
|
|
|
|
|
case *io.LimitedReader:
|
|
|
|
|
remain = r.N
|
|
|
|
|
|
|
|
|
|
case *os.File:
|
|
|
|
|
// For files, always presume max concurrency.
|
|
|
|
|
remain = math.MaxInt64
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if remain > int64(f.c.maxPacket) {
|
|
|
|
|
// Only use concurrency, if it would be at least two read/writes.
|
|
|
|
|
return f.readFromConcurrent(r, remain)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ch := make(chan result, 1) // reusable channel
|
|
|
|
|
|
|
|
|
|
b := make([]byte, f.c.maxPacket)
|
|
|
|
|
|
|
|
|
|
var read int64
|
|
|
|
|
for {
|
|
|
|
|
n, err := r.Read(b)
|
|
|
|
|
if n < 0 {
|
|
|
|
|
panic("sftp.File: reader returned negative count from Read")
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if n > 0 {
|
|
|
|
|
read += int64(n)
|
|
|
|
|
|
|
|
|
|
m, err2 := f.writeChunkAt(ch, b[:n], f.offset)
|
|
|
|
|
f.offset += int64(m)
|
|
|
|
|
|
|
|
|
|
if err == nil {
|
|
|
|
|
err = err2
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
|
if err == io.EOF {
|
|
|
|
|
return read, nil // return nil explicitly.
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return read, err
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2014-06-12 03:17:53 +08:00
|
|
|
|
// Seek implements io.Seeker by setting the client offset for the next Read or
|
2014-06-13 16:22:04 +08:00
|
|
|
|
// Write. It returns the next offset read. Seeking before or after the end of
|
|
|
|
|
// the file is undefined. Seeking relative to the end calls Stat.
|
2014-06-12 03:17:53 +08:00
|
|
|
|
func (f *File) Seek(offset int64, whence int) (int64, error) {
|
2021-02-22 04:32:09 +08:00
|
|
|
|
f.mu.Lock()
|
|
|
|
|
defer f.mu.Unlock()
|
|
|
|
|
|
2014-06-12 03:17:53 +08:00
|
|
|
|
switch whence {
|
2018-02-16 02:27:33 +08:00
|
|
|
|
case io.SeekStart:
|
|
|
|
|
case io.SeekCurrent:
|
2021-02-22 04:32:09 +08:00
|
|
|
|
offset += f.offset
|
2018-02-16 02:27:33 +08:00
|
|
|
|
case io.SeekEnd:
|
2014-06-12 03:17:53 +08:00
|
|
|
|
fi, err := f.Stat()
|
|
|
|
|
if err != nil {
|
2021-02-22 04:32:09 +08:00
|
|
|
|
return f.offset, err
|
2014-06-12 03:17:53 +08:00
|
|
|
|
}
|
2021-02-22 04:32:09 +08:00
|
|
|
|
offset += fi.Size()
|
2014-06-12 03:17:53 +08:00
|
|
|
|
default:
|
2021-02-22 04:32:09 +08:00
|
|
|
|
return f.offset, unimplementedSeekWhence(whence)
|
2014-06-12 03:17:53 +08:00
|
|
|
|
}
|
2021-02-22 04:32:09 +08:00
|
|
|
|
|
|
|
|
|
if offset < 0 {
|
|
|
|
|
return f.offset, os.ErrInvalid
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
f.offset = offset
|
|
|
|
|
return f.offset, nil
|
2014-06-12 03:17:53 +08:00
|
|
|
|
}
|
|
|
|
|
|
2014-06-22 13:48:11 +08:00
|
|
|
|
// Chown changes the uid/gid of the current file.
|
|
|
|
|
func (f *File) Chown(uid, gid int) error {
|
|
|
|
|
return f.c.Chown(f.path, uid, gid)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Chmod changes the permissions of the current file.
|
|
|
|
|
func (f *File) Chmod(mode os.FileMode) error {
|
|
|
|
|
return f.c.Chmod(f.path, mode)
|
|
|
|
|
}
|
|
|
|
|
|
2020-10-11 20:03:31 +08:00
|
|
|
|
// Sync requests a flush of the contents of a File to stable storage.
|
|
|
|
|
//
|
|
|
|
|
// Sync requires the server to support the fsync@openssh.com extension.
|
|
|
|
|
func (f *File) Sync() error {
|
|
|
|
|
id := f.c.nextID()
|
2020-10-29 06:20:19 +08:00
|
|
|
|
typ, data, err := f.c.sendPacket(nil, sshFxpFsyncPacket{
|
2020-10-11 20:03:31 +08:00
|
|
|
|
ID: id,
|
|
|
|
|
Handle: f.handle,
|
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
switch {
|
|
|
|
|
case err != nil:
|
|
|
|
|
return err
|
|
|
|
|
case typ == sshFxpStatus:
|
|
|
|
|
return normaliseError(unmarshalStatus(id, data))
|
|
|
|
|
default:
|
|
|
|
|
return &unexpectedPacketErr{want: sshFxpStatus, got: typ}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2014-06-22 13:48:11 +08:00
|
|
|
|
// Truncate sets the size of the current file. Although it may be safely assumed
|
2014-09-26 04:40:48 +08:00
|
|
|
|
// that if the size is less than its current size it will be truncated to fit,
|
2014-06-22 13:48:11 +08:00
|
|
|
|
// the SFTP protocol does not specify what behavior the server should do when setting
|
|
|
|
|
// size greater than the current size.
|
2020-08-22 15:51:12 +08:00
|
|
|
|
// We send a SSH_FXP_FSETSTAT here since we have a file handle
|
2014-06-22 13:48:11 +08:00
|
|
|
|
func (f *File) Truncate(size int64) error {
|
2020-08-25 13:46:42 +08:00
|
|
|
|
return f.c.setfstat(f.handle, sshFileXferAttrSize, uint64(size))
|
2014-06-22 13:48:11 +08:00
|
|
|
|
}
|
|
|
|
|
|
2013-11-08 18:24:50 +08:00
|
|
|
|
func min(a, b int) int {
|
|
|
|
|
if a > b {
|
|
|
|
|
return b
|
|
|
|
|
}
|
|
|
|
|
return a
|
2013-11-06 10:04:40 +08:00
|
|
|
|
}
|
|
|
|
|
|
2016-01-04 03:54:19 +08:00
|
|
|
|
// normaliseError normalises an error into a more standard form that can be
|
|
|
|
|
// checked against stdlib errors like io.EOF or os.ErrNotExist.
|
|
|
|
|
func normaliseError(err error) error {
|
|
|
|
|
switch err := err.(type) {
|
|
|
|
|
case *StatusError:
|
|
|
|
|
switch err.Code {
|
2019-08-30 23:04:37 +08:00
|
|
|
|
case sshFxEOF:
|
2016-01-04 03:54:19 +08:00
|
|
|
|
return io.EOF
|
2019-08-30 23:04:37 +08:00
|
|
|
|
case sshFxNoSuchFile:
|
2016-01-04 03:54:19 +08:00
|
|
|
|
return os.ErrNotExist
|
2020-12-08 19:14:21 +08:00
|
|
|
|
case sshFxPermissionDenied:
|
|
|
|
|
return os.ErrPermission
|
2019-08-30 23:04:37 +08:00
|
|
|
|
case sshFxOk:
|
2016-01-04 03:54:19 +08:00
|
|
|
|
return nil
|
|
|
|
|
default:
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
default:
|
|
|
|
|
return err
|
2013-11-06 12:00:04 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2013-11-06 11:15:26 +08:00
|
|
|
|
func unmarshalStatus(id uint32, data []byte) error {
|
|
|
|
|
sid, data := unmarshalUint32(data)
|
|
|
|
|
if sid != id {
|
2016-01-05 05:15:21 +08:00
|
|
|
|
return &unexpectedIDErr{id, sid}
|
2013-11-06 11:15:26 +08:00
|
|
|
|
}
|
|
|
|
|
code, data := unmarshalUint32(data)
|
2016-06-24 06:35:20 +08:00
|
|
|
|
msg, data, _ := unmarshalStringSafe(data)
|
2015-10-27 19:16:11 +08:00
|
|
|
|
lang, _, _ := unmarshalStringSafe(data)
|
2013-11-06 11:15:26 +08:00
|
|
|
|
return &StatusError{
|
|
|
|
|
Code: code,
|
|
|
|
|
msg: msg,
|
|
|
|
|
lang: lang,
|
|
|
|
|
}
|
|
|
|
|
}
|
2013-11-14 12:32:21 +08:00
|
|
|
|
|
2015-07-25 16:19:29 +08:00
|
|
|
|
func marshalStatus(b []byte, err StatusError) []byte {
|
|
|
|
|
b = marshalUint32(b, err.Code)
|
|
|
|
|
b = marshalString(b, err.msg)
|
|
|
|
|
b = marshalString(b, err.lang)
|
|
|
|
|
return b
|
|
|
|
|
}
|
|
|
|
|
|
2013-11-14 12:32:21 +08:00
|
|
|
|
// flags converts the flags passed to OpenFile into ssh flags.
|
|
|
|
|
// Unsupported flags are ignored.
|
|
|
|
|
func flags(f int) uint32 {
|
|
|
|
|
var out uint32
|
|
|
|
|
switch f & os.O_WRONLY {
|
|
|
|
|
case os.O_WRONLY:
|
2019-08-30 23:04:37 +08:00
|
|
|
|
out |= sshFxfWrite
|
2013-11-14 12:32:21 +08:00
|
|
|
|
case os.O_RDONLY:
|
2019-08-30 23:04:37 +08:00
|
|
|
|
out |= sshFxfRead
|
2013-11-14 12:32:21 +08:00
|
|
|
|
}
|
|
|
|
|
if f&os.O_RDWR == os.O_RDWR {
|
2019-08-30 23:04:37 +08:00
|
|
|
|
out |= sshFxfRead | sshFxfWrite
|
2013-11-14 12:32:21 +08:00
|
|
|
|
}
|
|
|
|
|
if f&os.O_APPEND == os.O_APPEND {
|
2019-08-30 23:04:37 +08:00
|
|
|
|
out |= sshFxfAppend
|
2013-11-14 12:32:21 +08:00
|
|
|
|
}
|
|
|
|
|
if f&os.O_CREATE == os.O_CREATE {
|
2019-08-30 23:04:37 +08:00
|
|
|
|
out |= sshFxfCreat
|
2013-11-14 12:32:21 +08:00
|
|
|
|
}
|
|
|
|
|
if f&os.O_TRUNC == os.O_TRUNC {
|
2019-08-30 23:04:37 +08:00
|
|
|
|
out |= sshFxfTrunc
|
2013-11-14 12:32:21 +08:00
|
|
|
|
}
|
|
|
|
|
if f&os.O_EXCL == os.O_EXCL {
|
2019-08-30 23:04:37 +08:00
|
|
|
|
out |= sshFxfExcl
|
2013-11-14 12:32:21 +08:00
|
|
|
|
}
|
|
|
|
|
return out
|
|
|
|
|
}
|